Migrated to community.amazon

pull/67091/head
Ansible Core Team 5 years ago committed by Matt Martz
parent 32bf1b8115
commit 58e8a91f4d

@ -1,219 +0,0 @@
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
regions = all
regions_exclude = us-gov-west-1, cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
# may optionally be used; however the boto instance variables hold precedence
# in the event of a collision.
destination_variable = public_dns_name
# This allows you to override the inventory_name with an ec2 variable, instead
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
#hostname_variable = tag_Name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
# vpc_destination_variable = private_ip_address
vpc_destination_variable = ip_address
# The following two settings allow flexible ansible host naming based on a
# python format string and a comma-separated list of ec2 tags. Note that:
#
# 1) If the tags referenced are not present for some instances, empty strings
# will be substituted in the format string.
# 2) This overrides both destination_variable and vpc_destination_variable.
#
#destination_format = {0}.{1}.example.com
#destination_format_tags = Name,environment
# To tag instances on EC2 with the resource records that point to them from
# Route53, set 'route53' to True.
route53 = False
# To use Route53 records as the inventory hostnames, uncomment and set
# to equal the domain name you wish to use. You must also have 'route53' (above)
# set to True.
# route53_hostnames = .example.com
# To exclude RDS instances from the inventory, uncomment and set to False.
#rds = False
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
#elasticache = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# By default, only EC2 instances in the 'running' state are returned. Set
# 'all_instances' to True to return all instances regardless of state.
all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
# option is overridden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
# Include RDS cluster information (Aurora etc.)
include_rds_clusters = False
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
#
# Note that all_elasticache_nodes only applies to listed clusters. That means
# if you set all_elastic_clusters to false, no node will be return from
# unavailable clusters, regardless of the state and to what you set for
# all_elasticache_nodes.
all_elasticache_replication_groups = False
all_elasticache_clusters = False
all_elasticache_nodes = False
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = True
# If set to true, any tag of the form "a,b,c" is expanded into a list
# and the results are used to create additional tag_* inventory groups.
expand_csv_tags = False
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
group_by_region = True
group_by_availability_zone = True
group_by_aws_account = False
group_by_ami_id = True
group_by_instance_type = True
group_by_instance_state = False
group_by_platform = True
group_by_key_pair = True
group_by_vpc_id = True
group_by_security_group = True
group_by_tag_keys = True
group_by_tag_none = True
group_by_route53_names = True
group_by_rds_engine = True
group_by_rds_parameter_group = True
group_by_elasticache_engine = True
group_by_elasticache_cluster = True
group_by_elasticache_parameter_group = True
group_by_elasticache_replication_group = True
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
# pattern_exclude = staging-*
# Instance filters can be used to control which instances are retrieved for
# inventory. For the full list of possible filters, please read the EC2 API
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
# Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. To "AND" criteria together, use "&". Note that
# the "AND" is not useful along with stack_filters and so such usage is not allowed.
# See examples below.
# If you want to apply multiple filters simultaneously, set stack_filters to
# True. Default behaviour is to combine the results of all filters. Stacking
# allows the use of multiple conditions to filter down, for example by
# environment and type of host.
stack_filters = False
# Retrieve only instances with (key=value) env=staging tag
# instance_filters = tag:env=staging
# Retrieve only instances with role=webservers OR role=dbservers tag
# instance_filters = tag:role=webservers,tag:role=dbservers
# Retrieve only t1.micro instances OR instances with tag env=staging
# instance_filters = instance-type=t1.micro,tag:env=staging
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# Retrieve only instances of type t1.micro that also have tag env=stage
# instance_filters = instance-type=t1.micro&tag:env=stage
# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
# that are of type m3.large, regardless of env tag
# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
# An IAM role can be assumed, so all requests are run as that role.
# This can be useful for connecting across different accounts, or to limit user
# access
# iam_role = role-arn
# A boto configuration profile may be used to separate out credentials
# see https://boto.readthedocs.io/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
[credentials]
# The AWS credentials can optionally be specified here. Credentials specified
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
# AWS_PROFILE is set, or if the boto_profile property above is set.
#
# Supplying AWS credentials here is not recommended, as it introduces
# non-trivial security concerns. When going down this route, please make sure
# to set access permissions for this file correctly, e.g. handle it the same
# way as you would a private SSH key.
#
# Unlike the boto and AWS configure files, this section does not support
# profiles.
#
# aws_access_key_id = AXXXXXXXXXXXXXX
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX

File diff suppressed because it is too large Load Diff

@ -1 +0,0 @@
iam_server_certificate_info.py

@ -1,389 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(lambda_info) instead.
short_description: Gathers AWS Lambda function details as Ansible facts
description:
- Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
M(lambda_event) to manage lambda event source mappings.
version_added: "2.2"
options:
query:
description:
- Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
default: "all"
type: str
function_name:
description:
- The name of the lambda function for which facts are requested.
aliases: [ "function", "name"]
type: str
event_source_arn:
description:
- For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
type: str
author: Pierre Jodouin (@pjodouin)
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
# Simple example of listing all info for a function
- name: List all for a specific function
lambda_facts:
query: all
function_name: myFunction
register: my_function_details
# List all versions of a function
- name: List function versions
lambda_facts:
query: versions
function_name: myFunction
register: my_function_versions
# List all lambda function versions
- name: List all function
lambda_facts:
query: all
max_items: 20
- name: show Lambda facts
debug:
var: lambda_facts
'''
RETURN = '''
---
lambda_facts:
description: lambda facts
returned: success
type: dict
lambda_facts.function:
description: lambda function list
returned: success
type: dict
lambda_facts.function.TheName:
description: lambda function information, including event, mapping, and version information
returned: success
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
import json
import datetime
import sys
import re
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
def fix_return(node):
"""
fixup returned dictionary
:param node:
:return:
"""
if isinstance(node, datetime.datetime):
node_value = str(node)
elif isinstance(node, list):
node_value = [fix_return(item) for item in node]
elif isinstance(node, dict):
node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
else:
node_value = node
return node_value
def alias_details(client, module):
"""
Returns list of aliases for a specified function.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(aliases=[])
else:
module.fail_json_aws(e, msg="Trying to get aliases")
else:
module.fail_json(msg='Parameter function_name required for query=aliases.')
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def all_details(client, module):
"""
Returns all lambda related facts.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
if module.params.get('max_items') or module.params.get('next_marker'):
module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
lambda_facts[function_name] = {}
lambda_facts[function_name].update(config_details(client, module)[function_name])
lambda_facts[function_name].update(alias_details(client, module)[function_name])
lambda_facts[function_name].update(policy_details(client, module)[function_name])
lambda_facts[function_name].update(version_details(client, module)[function_name])
lambda_facts[function_name].update(mapping_details(client, module)[function_name])
else:
lambda_facts.update(config_details(client, module))
return lambda_facts
def config_details(client, module):
"""
Returns configuration details for one or all lambda functions.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
try:
lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(function={})
else:
module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
else:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(function_list=[])
else:
module.fail_json_aws(e, msg="Trying to get function list")
functions = dict()
for func in lambda_facts.pop('function_list', []):
functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
return functions
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def mapping_details(client, module):
"""
Returns all lambda event source mappings.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
params = dict()
function_name = module.params.get('function_name')
if function_name:
params['FunctionName'] = module.params.get('function_name')
if module.params.get('event_source_arn'):
params['EventSourceArn'] = module.params.get('event_source_arn')
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(mappings=[])
else:
module.fail_json_aws(e, msg="Trying to get source event mappings")
if function_name:
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
return camel_dict_to_snake_dict(lambda_facts)
def policy_details(client, module):
"""
Returns policy attached to a lambda function.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
if module.params.get('max_items') or module.params.get('next_marker'):
module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
try:
# get_policy returns a JSON string so must convert to dict before reassigning to its key
lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(policy={})
else:
module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
else:
module.fail_json(msg='Parameter function_name required for query=policy.')
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def version_details(client, module):
"""
Returns all lambda function versions.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(versions=[])
else:
module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
else:
module.fail_json(msg='Parameter function_name required for query=versions.')
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = dict(
function_name=dict(required=False, default=None, aliases=['function', 'name']),
query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
event_source_arn=dict(required=False, default=None)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate function_name if present
function_name = module.params['function_name']
if function_name:
if not re.search(r"^[\w\-:]+$", function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
client = module.client('lambda')
this_module = sys.modules[__name__]
invocations = dict(
aliases='alias_details',
all='all_details',
config='config_details',
mappings='mapping_details',
policy='policy_details',
versions='version_details',
)
this_module_function = getattr(this_module, invocations[module.params['query']])
all_facts = fix_return(this_module_function(client, module))
results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
if module.check_mode:
results['msg'] = 'Check mode set but ignored for fact gathering only.'
module.exit_json(**results)
if __name__ == '__main__':
main()

@ -1,397 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
# on behalf of Telstra Corporation Limited
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_acm
short_description: Upload and delete certificates in the AWS Certificate Manager service
description:
- Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM).
- >
This module does not currently interact with AWS-provided certificates.
It currently only manages certificates provided to AWS by the user.
- The ACM API allows users to upload multiple certificates for the same domain name,
and even multiple identical certificates.
This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy.
It does this through applying AWS resource "Name" tags to ACM certificates.
- >
When I(state=present),
if there is one certificate in ACM
with a C(Name) tag equal to the C(name_tag) parameter,
and an identical body and chain,
this task will succeed without effect.
- >
When I(state=present),
if there is one certificate in ACM
a I(Name) tag equal to the I(name_tag) parameter,
and a different body,
this task will overwrite that certificate.
- >
When I(state=present),
if there are multiple certificates in ACM
with a I(Name) tag equal to the I(name_tag) parameter,
this task will fail.
- >
When I(state=absent) and I(certificate_arn) is defined,
this module will delete the ACM resource with that ARN if it exists in this region,
and succeed without effect if it doesn't exist.
- >
When I(state=absent) and I(domain_name) is defined,
this module will delete all ACM resources in this AWS region with a corresponding domain name.
If there are none, it will succeed without effect.
- >
When I(state=absent) and I(certificate_arn) is not defined,
and I(domain_name) is not defined,
this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag.
If there are none, it will succeed without effect.
- Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API.
version_added: "2.10"
options:
certificate:
description:
- The body of the PEM encoded public certificate.
- Required when I(state) is not C(absent).
- If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')).
type: str
certificate_arn:
description:
- The ARN of a certificate in ACM to delete
- Ignored when I(state=present).
- If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag).
- >
If I(state=absent) and no resource exists with this ARN in this region,
the task will succeed with no effect.
- >
If I(state=absent) and the corresponding resource exists in a different region,
this task may report success without deleting that resource.
type: str
aliases: [arn]
certificate_chain:
description:
- The body of the PEM encoded chain for your certificate.
- If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')).
- Ignored when I(state=absent)
type: str
domain_name:
description:
- The domain name of the certificate.
- >
If I(state=absent) and I(domain_name) is specified,
this task will delete all ACM certificates with this domain.
- Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided.
- >
If I(state=present) this must not be specified.
(Since the domain name is encoded within the public certificate's body.)
type: str
aliases: [domain]
name_tag:
description:
- The unique identifier for tagging resources using AWS tags, with key I(Name).
- This can be any set of characters accepted by AWS for tag values.
- >
This is to ensure Ansible can treat certificates idempotently,
even though the ACM API allows duplicate certificates.
- If I(state=preset), this must be specified.
- >
If I(state=absent), you must provide exactly one of
I(certificate_arn), I(domain_name) or I(name_tag).
type: str
aliases: [name]
private_key:
description:
- The body of the PEM encoded private key.
- Required when I(state=present).
- Ignored when I(state=absent).
- If your private key is in a file, use C(lookup('file', 'path/to/key.pem')).
type: str
state:
description:
- >
If I(state=present), the specified public certificate and private key
will be uploaded, with I(Name) tag equal to I(name_tag).
- >
If I(state=absent), any certificates in this region
with a corresponding I(domain_name), I(name_tag) or I(certificate_arn)
will be deleted.
choices: [present, absent]
default: present
type: str
requirements:
- boto3
author:
- Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: upload a self-signed certificate
aws_acm:
certificate: "{{ lookup('file', 'cert.pem' ) }}"
privateKey: "{{ lookup('file', 'key.pem' ) }}"
name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert"
region: ap-southeast-2 # AWS region
- name: create/update a certificate with a chain
aws_acm:
certificate: "{{ lookup('file', 'cert.pem' ) }}"
privateKey: "{{ lookup('file', 'key.pem' ) }}"
name_tag: my_cert
certificate_chain: "{{ lookup('file', 'chain.pem' ) }}"
state: present
region: ap-southeast-2
register: cert_create
- name: print ARN of cert we just created
debug:
var: cert_create.certificate.arn
- name: delete the cert we just created
aws_acm:
name_tag: my_cert
state: absent
region: ap-southeast-2
- name: delete a certificate with a particular ARN
aws_acm:
certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
state: absent
region: ap-southeast-2
- name: delete all certificates with a particular domain name
aws_acm:
domain_name: acm.ansible.com
state: absent
region: ap-southeast-2
'''
RETURN = '''
certificate:
description: Information about the certificate which was uploaded
type: complex
returned: when I(state=present)
contains:
arn:
description: The ARN of the certificate in ACM
type: str
returned: when I(state=present)
sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
domain_name:
description: The domain name encoded within the public certificate
type: str
returned: when I(state=present)
sample: acm.ansible.com
arns:
description: A list of the ARNs of the certificates in ACM which were deleted
type: list
elements: str
returned: when I(state=absent)
sample:
- "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.acm import ACMServiceManager
from ansible.module_utils._text import to_text
import base64
import re # regex library
# Takes in two text arguments
# Each a PEM encoded certificate
# Or a chain of PEM encoded certificates
# May include some lines between each chain in the cert, e.g. "Subject: ..."
# Returns True iff the chains/certs are functionally identical (including chain order)
def chain_compare(module, a, b):
chain_a_pem = pem_chain_split(module, a)
chain_b_pem = pem_chain_split(module, b)
if len(chain_a_pem) != len(chain_b_pem):
return False
# Chain length is the same
for (ca, cb) in zip(chain_a_pem, chain_b_pem):
der_a = PEM_body_to_DER(module, ca)
der_b = PEM_body_to_DER(module, cb)
if der_a != der_b:
return False
return True
# Takes in PEM encoded data with no headers
# returns equivilent DER as byte array
def PEM_body_to_DER(module, pem):
try:
der = base64.b64decode(to_text(pem))
except (ValueError, TypeError) as e:
module.fail_json_aws(e, msg="Unable to decode certificate chain")
return der
# Store this globally to avoid repeated recompilation
pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?")
# Use regex to split up a chain or single cert into an array of base64 encoded data
# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----"
# Noting that some chains have non-pem data in between each cert
# This function returns only what's between the headers, excluding the headers
def pem_chain_split(module, pem):
pem_arr = re.findall(pem_chain_split_regex, to_text(pem))
if len(pem_arr) == 0:
# This happens if the regex doesn't match at all
module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?")
return pem_arr
def main():
argument_spec = dict(
certificate=dict(),
certificate_arn=dict(aliases=['arn']),
certificate_chain=dict(),
domain_name=dict(aliases=['domain']),
name_tag=dict(aliases=['name']),
private_key=dict(no_log=True),
state=dict(default='present', choices=['present', 'absent'])
)
required_if = [
['state', 'present', ['certificate', 'name_tag', 'private_key']],
]
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
acm = ACMServiceManager(module)
# Check argument requirements
if module.params['state'] == 'present':
if module.params['certificate_arn']:
module.fail_json(msg="Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'")
else: # absent
# exactly one of these should be specified
absent_args = ['certificate_arn', 'domain_name', 'name_tag']
if sum([(module.params[a] is not None) for a in absent_args]) != 1:
for a in absent_args:
module.debug("%s is %s" % (a, module.params[a]))
module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified")
if module.params['name_tag']:
tags = dict(Name=module.params['name_tag'])
else:
tags = None
client = module.client('acm')
# fetch the list of certificates currently in ACM
certificates = acm.get_certificates(client=client,
module=module,
domain_name=module.params['domain_name'],
arn=module.params['certificate_arn'],
only_tags=tags)
module.debug("Found %d corresponding certificates in ACM" % len(certificates))
if module.params['state'] == 'present':
if len(certificates) > 1:
msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag']
module.fail_json(msg=msg, certificates=certificates)
elif len(certificates) == 1:
# update the existing certificate
module.debug("Existing certificate found in ACM")
old_cert = certificates[0] # existing cert in ACM
if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or (old_cert['tags']['Name'] != module.params['name_tag']):
# shouldn't happen
module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert)
if 'certificate' not in old_cert:
# shouldn't happen
module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert)
# Are the existing certificate in ACM and the local certificate the same?
same = True
same &= chain_compare(module, old_cert['certificate'], module.params['certificate'])
if module.params['certificate_chain']:
# Need to test this
# not sure if Amazon appends the cert itself to the chain when self-signed
same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain'])
else:
# When there is no chain with a cert
# it seems Amazon returns the cert itself as the chain
same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate'])
if same:
module.debug("Existing certificate in ACM is the same, doing nothing")
domain = acm.get_domain_of_cert(client=client, module=module, arn=old_cert['certificate_arn'])
module.exit_json(certificate=dict(domain_name=domain, arn=old_cert['certificate_arn']), changed=False)
else:
module.debug("Existing certificate in ACM is different, overwriting")
# update cert in ACM
arn = acm.import_certificate(client, module,
certificate=module.params['certificate'],
private_key=module.params['private_key'],
certificate_chain=module.params['certificate_chain'],
arn=old_cert['certificate_arn'],
tags=tags)
domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
else: # len(certificates) == 0
module.debug("No certificate in ACM. Creating new one.")
arn = acm.import_certificate(client=client,
module=module,
certificate=module.params['certificate'],
private_key=module.params['private_key'],
certificate_chain=module.params['certificate_chain'],
tags=tags)
domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
else: # state == absent
for cert in certificates:
acm.delete_certificate(client, module, cert['certificate_arn'])
module.exit_json(arns=[cert['certificate_arn'] for cert in certificates],
changed=(len(certificates) > 0))
if __name__ == '__main__':
# tests()
main()

@ -1,299 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_acm_info
short_description: Retrieve certificate information from AWS Certificate Manager service
description:
- Retrieve information for ACM certificates
- This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change.
- Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API.
version_added: "2.5"
options:
certificate_arn:
description:
- If provided, the results will be filtered to show only the certificate with this ARN.
- If no certificate with this ARN exists, this task will fail.
- If a certificate with this ARN exists in a different region, this task will fail
aliases:
- arn
version_added: '2.10'
type: str
domain_name:
description:
- The domain name of an ACM certificate to limit the search to
aliases:
- name
type: str
statuses:
description:
- Status to filter the certificate results
choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
type: list
elements: str
tags:
description:
- Filter results to show only certificates with tags that match all the tags specified here.
type: dict
version_added: '2.10'
requirements:
- boto3
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: obtain all ACM certificates
aws_acm_info:
- name: obtain all information for a single ACM certificate
aws_acm_info:
domain_name: "*.example_com"
- name: obtain all certificates pending validation
aws_acm_info:
statuses:
- PENDING_VALIDATION
- name: obtain all certificates with tag Name=foo and myTag=bar
aws_acm_info:
tags:
Name: foo
myTag: bar
# The output is still a list of certificates, just one item long.
- name: obtain information about a certificate with a particular ARN
aws_acm_info:
certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
'''
RETURN = '''
certificates:
description: A list of certificates
returned: always
type: complex
contains:
certificate:
description: The ACM Certificate body
returned: when certificate creation is complete
sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
type: str
certificate_arn:
description: Certificate ARN
returned: always
sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
type: str
certificate_chain:
description: Full certificate chain for the certificate
returned: when certificate creation is complete
sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
type: str
created_at:
description: Date certificate was created
returned: always
sample: '2017-08-15T10:31:19+10:00'
type: str
domain_name:
description: Domain name for the certificate
returned: always
sample: '*.example.com'
type: str
domain_validation_options:
description: Options used by ACM to validate the certificate
returned: when certificate type is AMAZON_ISSUED
type: complex
contains:
domain_name:
description: Fully qualified domain name of the certificate
returned: always
sample: example.com
type: str
validation_domain:
description: The domain name ACM used to send validation emails
returned: always
sample: example.com
type: str
validation_emails:
description: A list of email addresses that ACM used to send domain validation emails
returned: always
sample:
- admin@example.com
- postmaster@example.com
type: list
elements: str
validation_status:
description: Validation status of the domain
returned: always
sample: SUCCESS
type: str
failure_reason:
description: Reason certificate request failed
returned: only when certificate issuing failed
type: str
sample: NO_AVAILABLE_CONTACTS
in_use_by:
description: A list of ARNs for the AWS resources that are using the certificate.
returned: always
sample: []
type: list
elements: str
issued_at:
description: Date certificate was issued
returned: always
sample: '2017-01-01T00:00:00+10:00'
type: str
issuer:
description: Issuer of the certificate
returned: always
sample: Amazon
type: str
key_algorithm:
description: Algorithm used to generate the certificate
returned: always
sample: RSA-2048
type: str
not_after:
description: Date after which the certificate is not valid
returned: always
sample: '2019-01-01T00:00:00+10:00'
type: str
not_before:
description: Date before which the certificate is not valid
returned: always
sample: '2017-01-01T00:00:00+10:00'
type: str
renewal_summary:
description: Information about managed renewal process
returned: when certificate is issued by Amazon and a renewal has been started
type: complex
contains:
domain_validation_options:
description: Options used by ACM to validate the certificate
returned: when certificate type is AMAZON_ISSUED
type: complex
contains:
domain_name:
description: Fully qualified domain name of the certificate
returned: always
sample: example.com
type: str
validation_domain:
description: The domain name ACM used to send validation emails
returned: always
sample: example.com
type: str
validation_emails:
description: A list of email addresses that ACM used to send domain validation emails
returned: always
sample:
- admin@example.com
- postmaster@example.com
type: list
elements: str
validation_status:
description: Validation status of the domain
returned: always
sample: SUCCESS
type: str
renewal_status:
description: Status of the domain renewal
returned: always
sample: PENDING_AUTO_RENEWAL
type: str
revocation_reason:
description: Reason for certificate revocation
returned: when the certificate has been revoked
sample: SUPERCEDED
type: str
revoked_at:
description: Date certificate was revoked
returned: when the certificate has been revoked
sample: '2017-09-01T10:00:00+10:00'
type: str
serial:
description: The serial number of the certificate
returned: always
sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
type: str
signature_algorithm:
description: Algorithm used to sign the certificate
returned: always
sample: SHA256WITHRSA
type: str
status:
description: Status of the certificate in ACM
returned: always
sample: ISSUED
type: str
subject:
description: The name of the entity that is associated with the public key contained in the certificate
returned: always
sample: CN=*.example.com
type: str
subject_alternative_names:
description: Subject Alternative Names for the certificate
returned: always
sample:
- '*.example.com'
type: list
elements: str
tags:
description: Tags associated with the certificate
returned: always
type: dict
sample:
Application: helloworld
Environment: test
type:
description: The source of the certificate
returned: always
sample: AMAZON_ISSUED
type: str
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.acm import ACMServiceManager
def main():
argument_spec = dict(
certificate_arn=dict(aliases=['arn']),
domain_name=dict(aliases=['name']),
statuses=dict(type='list', choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']),
tags=dict(type='dict'),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
acm_info = ACMServiceManager(module)
if module._name == 'aws_acm_facts':
module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", version='2.13')
client = module.client('acm')
certificates = acm_info.get_certificates(client, module,
domain_name=module.params['domain_name'],
statuses=module.params['statuses'],
arn=module.params['certificate_arn'],
only_tags=module.params['tags'])
if module.params['certificate_arn'] and len(certificates) != 1:
module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
module.exit_json(certificates=certificates)
if __name__ == '__main__':
main()

@ -1,375 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_api_gateway
short_description: Manage AWS API Gateway APIs
description:
- Allows for the management of API Gateway APIs
- Normally you should give the api_id since there is no other
stable guaranteed unique identifier for the API. If you do
not give api_id then a new API will be create each time
this is run.
- Beware that there are very hard limits on the rate that
you can call API Gateway's REST API. You may need to patch
your boto. See U(https://github.com/boto/boto3/issues/876)
and discuss with your AWS rep.
- swagger_file and swagger_text are passed directly on to AWS
transparently whilst swagger_dict is an ansible dict which is
converted to JSON before the API definitions are uploaded.
version_added: '2.4'
requirements: [ boto3 ]
options:
api_id:
description:
- The ID of the API you want to manage.
type: str
state:
description: Create or delete API Gateway.
default: present
choices: [ 'present', 'absent' ]
type: str
swagger_file:
description:
- JSON or YAML file containing swagger definitions for API.
Exactly one of swagger_file, swagger_text or swagger_dict must
be present.
type: path
aliases: ['src', 'api_file']
swagger_text:
description:
- Swagger definitions for API in JSON or YAML as a string direct
from playbook.
type: str
swagger_dict:
description:
- Swagger definitions API ansible dictionary which will be
converted to JSON and uploaded.
type: json
stage:
description:
- The name of the stage the API should be deployed to.
type: str
deploy_desc:
description:
- Description of the deployment - recorded and visible in the
AWS console.
default: Automatic deployment by Ansible.
type: str
cache_enabled:
description:
- Enable API GW caching of backend responses. Defaults to false.
type: bool
default: false
version_added: '2.10'
cache_size:
description:
- Size in GB of the API GW cache, becomes effective when cache_enabled is true.
choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']
type: str
default: '0.5'
version_added: '2.10'
stage_variables:
description:
- ENV variables for the stage. Define a dict of key values pairs for variables.
type: dict
version_added: '2.10'
stage_canary_settings:
description:
- Canary settings for the deployment of the stage.
- 'Dict with following settings:'
- 'percentTraffic: The percent (0-100) of traffic diverted to a canary deployment.'
- 'deploymentId: The ID of the canary deployment.'
- 'stageVariableOverrides: Stage variables overridden for a canary release deployment.'
- 'useStageCache: A Boolean flag to indicate whether the canary deployment uses the stage cache or not.'
- See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage)
type: dict
version_added: '2.10'
tracing_enabled:
description:
- Specifies whether active tracing with X-ray is enabled for the API GW stage.
type: bool
version_added: '2.10'
endpoint_type:
description:
- Type of endpoint configuration, use C(EDGE) for an edge optimized API endpoint,
- C(REGIONAL) for just a regional deploy or PRIVATE for a private API.
- This will flag will only be used when creating a new API Gateway setup, not for updates.
choices: ['EDGE', 'REGIONAL', 'PRIVATE']
type: str
default: EDGE
version_added: '2.10'
author:
- 'Michael De La Rue (@mikedlr)'
extends_documentation_fragment:
- aws
- ec2
notes:
- A future version of this module will probably use tags or another
ID so that an API can be create only once.
- As an early work around an intermediate version will probably do
the same using a tag embedded in the API name.
'''
EXAMPLES = '''
- name: Setup AWS API Gateway setup on AWS and deploy API definition
aws_api_gateway:
swagger_file: my_api.yml
stage: production
cache_enabled: true
cache_size: '1.6'
tracing_enabled: true
endpoint_type: EDGE
state: present
- name: Update API definition to deploy new version
aws_api_gateway:
api_id: 'abc123321cba'
swagger_file: my_api.yml
deploy_desc: Make auth fix available.
cache_enabled: true
cache_size: '1.6'
endpoint_type: EDGE
state: present
- name: Update API definitions and settings and deploy as canary
aws_api_gateway:
api_id: 'abc123321cba'
swagger_file: my_api.yml
cache_enabled: true
cache_size: '6.1'
canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
state: present
'''
RETURN = '''
api_id:
description: API id of the API endpoint created
returned: success
type: str
sample: '0ln4zq7p86'
configure_response:
description: AWS response from the API configure call
returned: success
type: dict
sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" }
deploy_response:
description: AWS response from the API deploy call
returned: success
type: dict
sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." }
resource_actions:
description: Actions performed against AWS API
returned: always
type: list
sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"]
'''
import json
try:
import botocore
except ImportError:
# HAS_BOTOCORE taken care of in AnsibleAWSModule
pass
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict)
def main():
argument_spec = dict(
api_id=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
swagger_dict=dict(type='json', default=None),
swagger_text=dict(type='str', default=None),
stage=dict(type='str', default=None),
deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
cache_enabled=dict(type='bool', default=False),
cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']),
stage_variables=dict(type='dict', default={}),
stage_canary_settings=dict(type='dict', default={}),
tracing_enabled=dict(type='bool', default=False),
endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE'])
)
mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=mutually_exclusive,
)
api_id = module.params.get('api_id')
state = module.params.get('state') # noqa: F841
swagger_file = module.params.get('swagger_file')
swagger_dict = module.params.get('swagger_dict')
swagger_text = module.params.get('swagger_text')
endpoint_type = module.params.get('endpoint_type')
client = module.client('apigateway')
changed = True # for now it will stay that way until we can sometimes avoid change
conf_res = None
dep_res = None
del_res = None
if state == "present":
if api_id is None:
api_id = create_empty_api(module, client, endpoint_type)
api_data = get_api_definitions(module, swagger_file=swagger_file,
swagger_dict=swagger_dict, swagger_text=swagger_text)
conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
if state == "absent":
del_res = delete_rest_api(module, client, api_id)
exit_args = {"changed": changed, "api_id": api_id}
if conf_res is not None:
exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
if dep_res is not None:
exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
if del_res is not None:
exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
module.exit_json(**exit_args)
def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
apidata = None
if swagger_file is not None:
try:
with open(swagger_file) as f:
apidata = f.read()
except OSError as e:
msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e))
module.fail_json(msg=msg, exception=traceback.format_exc())
if swagger_dict is not None:
apidata = json.dumps(swagger_dict)
if swagger_text is not None:
apidata = swagger_text
if apidata is None:
module.fail_json(msg='module error - no swagger info provided')
return apidata
def create_empty_api(module, client, endpoint_type):
"""
creates a new empty API ready to be configured. The description is
temporarily set to show the API as incomplete but should be
updated when the API is configured.
"""
desc = "Incomplete API creation by ansible aws_api_gateway module"
try:
awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
module.fail_json_aws(e, msg="creating API")
return awsret["id"]
def delete_rest_api(module, client, api_id):
"""
Deletes entire REST API setup
"""
try:
delete_response = delete_api(client, api_id)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
module.fail_json_aws(e, msg="deleting API {0}".format(api_id))
return delete_response
def ensure_api_in_correct_state(module, client, api_id, api_data):
"""Make sure that we have the API configured and deployed as instructed.
This function first configures the API correctly uploading the
swagger definitions and then deploys those. Configuration and
deployment should be closely tied because there is only one set of
definitions so if we stop, they may be updated by someone else and
then we deploy the wrong configuration.
"""
configure_response = None
try:
configure_response = configure_api(client, api_id, api_data=api_data)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
module.fail_json_aws(e, msg="configuring API {0}".format(api_id))
deploy_response = None
stage = module.params.get('stage')
if stage:
try:
deploy_response = create_deployment(client, api_id, **module.params)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
msg = "deploying api {0} to stage {1}".format(api_id, stage)
module.fail_json_aws(e, msg)
return configure_response, deploy_response
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2}
@AWSRetry.backoff(**retry_params)
def create_api(client, name=None, description=None, endpoint_type=None):
return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]})
@AWSRetry.backoff(**retry_params)
def delete_api(client, api_id):
return client.delete_rest_api(restApiId=api_id)
@AWSRetry.backoff(**retry_params)
def configure_api(client, api_id, api_data=None, mode="overwrite"):
return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data)
@AWSRetry.backoff(**retry_params)
def create_deployment(client, rest_api_id, **params):
canary_settings = params.get('stage_canary_settings')
if canary_settings and len(canary_settings) > 0:
result = client.create_deployment(
restApiId=rest_api_id,
stageName=params.get('stage'),
description=params.get('deploy_desc'),
cacheClusterEnabled=params.get('cache_enabled'),
cacheClusterSize=params.get('cache_size'),
variables=params.get('stage_variables'),
canarySettings=canary_settings,
tracingEnabled=params.get('tracing_enabled')
)
else:
result = client.create_deployment(
restApiId=rest_api_id,
stageName=params.get('stage'),
description=params.get('deploy_desc'),
cacheClusterEnabled=params.get('cache_enabled'),
cacheClusterSize=params.get('cache_size'),
variables=params.get('stage_variables'),
tracingEnabled=params.get('tracing_enabled')
)
return result
if __name__ == '__main__':
main()

@ -1,543 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_application_scaling_policy
short_description: Manage Application Auto Scaling Scaling Policies
notes:
- for details of the parameters and returns see
U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy)
description:
- Creates, updates or removes a Scaling Policy
version_added: "2.5"
author:
- Gustavo Maia (@gurumaia)
- Chen Leibovich (@chenl87)
requirements: [ json, botocore, boto3 ]
options:
state:
description: Whether a policy should be present or absent
required: yes
choices: ['absent', 'present']
type: str
policy_name:
description: The name of the scaling policy.
required: yes
type: str
service_namespace:
description: The namespace of the AWS service.
required: yes
choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb']
type: str
resource_id:
description: The identifier of the resource associated with the scalable target.
required: yes
type: str
scalable_dimension:
description: The scalable dimension associated with the scalable target.
required: yes
choices: [ 'ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'elasticmapreduce:instancegroup:InstanceCount',
'appstream:fleet:DesiredCapacity',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits']
type: str
policy_type:
description: The policy type.
required: yes
choices: ['StepScaling', 'TargetTrackingScaling']
type: str
step_scaling_policy_configuration:
description: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling.
required: no
type: dict
target_tracking_scaling_policy_configuration:
description:
- A target tracking policy. This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling.
- 'Full documentation of the suboptions can be found in the API documentation:'
- 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)'
required: no
type: dict
suboptions:
CustomizedMetricSpecification:
description: The metric to use if using a customized metric.
type: dict
DisableScaleIn:
description: Whether scaling-in should be disabled.
type: bool
PredefinedMetricSpecification:
description: The metric to use if using a predefined metric.
type: dict
ScaleInCooldown:
description: The time (in seconds) to wait after scaling-in before another scaling action can occur.
type: int
ScaleOutCooldown:
description: The time (in seconds) to wait after scaling-out before another scaling action can occur.
type: int
TargetValue:
description: The target value for the metric
type: float
minimum_tasks:
description: The minimum value to scale to in response to a scale in event.
This parameter is required if you are creating a first new policy for the specified service.
required: no
version_added: "2.6"
type: int
maximum_tasks:
description: The maximum value to scale to in response to a scale out event.
This parameter is required if you are creating a first new policy for the specified service.
required: no
version_added: "2.6"
type: int
override_task_capacity:
description: Whether or not to override values of minimum and/or maximum tasks if it's already set.
required: no
default: no
type: bool
version_added: "2.6"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create step scaling policy for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: present
policy_name: test_policy
service_namespace: ecs
resource_id: service/poc-pricing/test-as
scalable_dimension: ecs:service:DesiredCount
policy_type: StepScaling
minimum_tasks: 1
maximum_tasks: 6
step_scaling_policy_configuration:
AdjustmentType: ChangeInCapacity
StepAdjustments:
- MetricIntervalUpperBound: 123
ScalingAdjustment: 2
- MetricIntervalLowerBound: 123
ScalingAdjustment: -2
Cooldown: 123
MetricAggregationType: Average
# Create target tracking scaling policy for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: present
policy_name: test_policy
service_namespace: ecs
resource_id: service/poc-pricing/test-as
scalable_dimension: ecs:service:DesiredCount
policy_type: TargetTrackingScaling
minimum_tasks: 1
maximum_tasks: 6
target_tracking_scaling_policy_configuration:
TargetValue: 60
PredefinedMetricSpecification:
PredefinedMetricType: ECSServiceAverageCPUUtilization
ScaleOutCooldown: 60
ScaleInCooldown: 60
# Remove scalable target for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: absent
policy_name: test_policy
policy_type: StepScaling
service_namespace: ecs
resource_id: service/cluster-name/service-name
scalable_dimension: ecs:service:DesiredCount
'''
RETURN = '''
alarms:
description: List of the CloudWatch alarms associated with the scaling policy
returned: when state present
type: complex
contains:
alarm_arn:
description: The Amazon Resource Name (ARN) of the alarm
returned: when state present
type: str
alarm_name:
description: The name of the alarm
returned: when state present
type: str
service_namespace:
description: The namespace of the AWS service.
returned: when state present
type: str
sample: ecs
resource_id:
description: The identifier of the resource associated with the scalable target.
returned: when state present
type: str
sample: service/cluster-name/service-name
scalable_dimension:
description: The scalable dimension associated with the scalable target.
returned: when state present
type: str
sample: ecs:service:DesiredCount
policy_arn:
description: The Amazon Resource Name (ARN) of the scaling policy..
returned: when state present
type: str
policy_name:
description: The name of the scaling policy.
returned: when state present
type: str
policy_type:
description: The policy type.
returned: when state present
type: str
min_capacity:
description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present).
returned: when state present
type: int
sample: 1
max_capacity:
description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present).
returned: when state present
type: int
sample: 2
role_arn:
description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present).
returned: when state present
type: str
sample: arn:aws:iam::123456789123:role/roleName
step_scaling_policy_configuration:
description: The step scaling policy.
returned: when state present and the policy type is StepScaling
type: complex
contains:
adjustment_type:
description: The adjustment type
returned: when state present and the policy type is StepScaling
type: str
sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity"
cooldown:
description: The amount of time, in seconds, after a scaling activity completes
where previous trigger-related scaling activities can influence future scaling events
returned: when state present and the policy type is StepScaling
type: int
sample: 60
metric_aggregation_type:
description: The aggregation type for the CloudWatch metrics
returned: when state present and the policy type is StepScaling
type: str
sample: "Average, Minimum, Maximum"
step_adjustments:
description: A set of adjustments that enable you to scale based on the size of the alarm breach
returned: when state present and the policy type is StepScaling
type: list
elements: dict
target_tracking_scaling_policy_configuration:
description: The target tracking policy.
returned: when state present and the policy type is TargetTrackingScaling
type: complex
contains:
predefined_metric_specification:
description: A predefined metric
returned: when state present and the policy type is TargetTrackingScaling
type: complex
contains:
predefined_metric_type:
description: The metric type
returned: when state present and the policy type is TargetTrackingScaling
type: str
sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization"
resource_label:
description: Identifies the resource associated with the metric type
returned: when metric type is ALBRequestCountPerTarget
type: str
scale_in_cooldown:
description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 60
scale_out_cooldown:
description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 60
target_value:
description: The target value for the metric
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 70
creation_time:
description: The Unix timestamp for when the scalable target was created.
returned: when state present
type: str
sample: '2017-09-28T08:22:51.881000-03:00'
''' # NOQA
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
# Merge the results of the scalable target creation and policy deletion/creation
# There's no risk in overriding values since mutual keys have the same values in our case
def merge_results(scalable_target_result, policy_result):
if scalable_target_result['changed'] or policy_result['changed']:
changed = True
else:
changed = False
merged_response = scalable_target_result['response'].copy()
merged_response.update(policy_result['response'])
return {"changed": changed, "response": merged_response}
def delete_scaling_policy(connection, module):
changed = False
try:
scaling_policy = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
if scaling_policy['ScalingPolicies']:
try:
connection.delete_scaling_policy(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyName=module.params.get('policy_name'),
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete scaling policy")
return {"changed": changed}
def create_scalable_target(connection, module):
changed = False
try:
scalable_targets = connection.describe_scalable_targets(
ServiceNamespace=module.params.get('service_namespace'),
ResourceIds=[
module.params.get('resource_id'),
],
ScalableDimension=module.params.get('scalable_dimension')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
# Scalable target registration will occur if:
# 1. There is no scalable target registered for this service
# 2. A scalable target exists, different min/max values are defined and override is set to "yes"
if (
not scalable_targets['ScalableTargets']
or (
module.params.get('override_task_capacity')
and (
scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
)
)
):
changed = True
try:
connection.register_scalable_target(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
MinCapacity=module.params.get('minimum_tasks'),
MaxCapacity=module.params.get('maximum_tasks')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to register scalable target")
try:
response = connection.describe_scalable_targets(
ServiceNamespace=module.params.get('service_namespace'),
ResourceIds=[
module.params.get('resource_id'),
],
ScalableDimension=module.params.get('scalable_dimension')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
if (response['ScalableTargets']):
snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
else:
snaked_response = {}
return {"changed": changed, "response": snaked_response}
def create_scaling_policy(connection, module):
try:
scaling_policy = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
changed = False
if scaling_policy['ScalingPolicies']:
scaling_policy = scaling_policy['ScalingPolicies'][0]
# check if the input parameters are equal to what's already configured
for attr in ('PolicyName',
'ServiceNamespace',
'ResourceId',
'ScalableDimension',
'PolicyType',
'StepScalingPolicyConfiguration',
'TargetTrackingScalingPolicyConfiguration'):
if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
changed = True
scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
else:
changed = True
scaling_policy = {
'PolicyName': module.params.get('policy_name'),
'ServiceNamespace': module.params.get('service_namespace'),
'ResourceId': module.params.get('resource_id'),
'ScalableDimension': module.params.get('scalable_dimension'),
'PolicyType': module.params.get('policy_type'),
'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
}
if changed:
try:
if (module.params.get('step_scaling_policy_configuration')):
connection.put_scaling_policy(
PolicyName=scaling_policy['PolicyName'],
ServiceNamespace=scaling_policy['ServiceNamespace'],
ResourceId=scaling_policy['ResourceId'],
ScalableDimension=scaling_policy['ScalableDimension'],
PolicyType=scaling_policy['PolicyType'],
StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
)
elif (module.params.get('target_tracking_scaling_policy_configuration')):
connection.put_scaling_policy(
PolicyName=scaling_policy['PolicyName'],
ServiceNamespace=scaling_policy['ServiceNamespace'],
ResourceId=scaling_policy['ResourceId'],
ScalableDimension=scaling_policy['ScalableDimension'],
PolicyType=scaling_policy['PolicyType'],
TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create scaling policy")
try:
response = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
if (response['ScalingPolicies']):
snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
else:
snaked_response = {}
return {"changed": changed, "response": snaked_response}
def main():
argument_spec = dict(
state=dict(type='str', required=True, choices=['present', 'absent']),
policy_name=dict(type='str', required=True),
service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
resource_id=dict(type='str', required=True),
scalable_dimension=dict(type='str',
required=True,
choices=['ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'elasticmapreduce:instancegroup:InstanceCount',
'appstream:fleet:DesiredCapacity',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits']),
policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
step_scaling_policy_configuration=dict(type='dict'),
target_tracking_scaling_policy_configuration=dict(
type='dict',
options=dict(
CustomizedMetricSpecification=dict(type='dict'),
DisableScaleIn=dict(type='bool'),
PredefinedMetricSpecification=dict(type='dict'),
ScaleInCooldown=dict(type='int'),
ScaleOutCooldown=dict(type='int'),
TargetValue=dict(type='float'),
)
),
minimum_tasks=dict(type='int'),
maximum_tasks=dict(type='int'),
override_task_capacity=dict(type='bool'),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
connection = module.client('application-autoscaling')
# Remove any target_tracking_scaling_policy_configuration suboptions that are None
policy_config_options = [
'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
]
if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
for option in policy_config_options:
if module.params['target_tracking_scaling_policy_configuration'][option] is None:
module.params['target_tracking_scaling_policy_configuration'].pop(option)
if module.params.get("state") == 'present':
# A scalable target must be registered prior to creating a scaling policy
scalable_target_result = create_scalable_target(connection, module)
policy_result = create_scaling_policy(connection, module)
# Merge the results of the scalable target creation and policy deletion/creation
# There's no risk in overriding values since mutual keys have the same values in our case
merged_result = merge_results(scalable_target_result, policy_result)
module.exit_json(**merged_result)
else:
policy_result = delete_scaling_policy(connection, module)
module.exit_json(**policy_result)
if __name__ == '__main__':
main()

@ -1,490 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_compute_environment
short_description: Manage AWS Batch Compute Environments
description:
- This module allows the management of AWS Batch Compute Environments.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
compute_environment_name:
description:
- The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
are allowed.
required: true
type: str
type:
description:
- The type of the compute environment.
required: true
choices: ["MANAGED", "UNMANAGED"]
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
compute_environment_state:
description:
- The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
from a queue and can scale out automatically based on queues.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
type: str
service_role:
description:
- The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
services on your behalf.
required: true
type: str
compute_resource_type:
description:
- The type of compute resource.
required: true
choices: ["EC2", "SPOT"]
type: str
minv_cpus:
description:
- The minimum number of EC2 vCPUs that an environment should maintain.
required: true
type: int
maxv_cpus:
description:
- The maximum number of EC2 vCPUs that an environment can reach.
required: true
type: int
desiredv_cpus:
description:
- The desired number of EC2 vCPUS in the compute environment.
type: int
instance_types:
description:
- The instance types that may be launched.
required: true
type: list
elements: str
image_id:
description:
- The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
type: str
subnets:
description:
- The VPC subnets into which the compute resources are launched.
required: true
type: list
elements: str
security_group_ids:
description:
- The EC2 security groups that are associated with instances launched in the compute environment.
required: true
type: list
elements: str
ec2_key_pair:
description:
- The EC2 key pair that is used for instances launched in the compute environment.
type: str
instance_role:
description:
- The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
required: true
type: str
tags:
description:
- Key-value pair tags to be applied to resources that are launched in the compute environment.
type: dict
bid_percentage:
description:
- The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
must be below 20% of the current On-Demand price for that EC2 instance.
type: int
spot_iam_fleet_role:
description:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
type: str
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Compute Environment
aws_batch_compute_environment:
compute_environment_name: computeEnvironmentName
state: present
region: us-east-1
compute_environment_state: ENABLED
type: MANAGED
compute_resource_type: EC2
minv_cpus: 0
maxv_cpus: 2
desiredv_cpus: 1
instance_types:
- optimal
subnets:
- my-subnet1
- my-subnet2
security_group_ids:
- my-sg1
- my-sg2
instance_role: arn:aws:iam::<account>:instance-profile/<role>
tags:
tag1: value1
tag2: value2
service_role: arn:aws:iam::<account>:role/service-role/<role>
register: aws_batch_compute_environment_action
- name: show results
debug:
var: aws_batch_compute_environment_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_compute_environment_action: none
changed: false
invocation:
module_args:
aws_access_key: ~
aws_secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
ec2_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
- optimal
maxv_cpus: 8
minv_cpus: 0
profile: ~
region: us-east-1
security_group_ids:
- "*******"
security_token: ~
service_role: "arn:aws:iam::...."
spot_iam_fleet_role: ~
state: present
subnets:
- "******"
tags:
Environment: <name>
Name: <name>
type: MANAGED
validate_certs: true
response:
computeEnvironmentArn: "arn:aws:batch:...."
computeEnvironmentName: <name>
computeResources:
desiredvCpus: 0
instanceRole: "arn:aws:iam::..."
instanceTypes:
- optimal
maxvCpus: 8
minvCpus: 0
securityGroupIds:
- "******"
subnets:
- "*******"
tags:
Environment: <name>
Name: <name>
type: EC2
ecsClusterArn: "arn:aws:ecs:....."
serviceRole: "arn:aws:iam::..."
state: ENABLED
status: VALID
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
import re
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
def validate_params(module):
"""
Performs basic parameter validation.
:param module:
:return:
"""
compute_environment_name = module.params['compute_environment_name']
# validate compute environment name
if not re.search(r'^[\w\_:]+$', compute_environment_name):
module.fail_json(
msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
"and underscores.".format(compute_environment_name)
)
if not compute_environment_name.startswith('arn:aws:batch:'):
if len(compute_environment_name) > 128:
module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
.format(compute_environment_name))
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Compute Environment functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_compute_environment(module, client):
try:
environments = client.describe_compute_environments(
computeEnvironments=[module.params['compute_environment_name']]
)
if len(environments['computeEnvironments']) > 0:
return environments['computeEnvironments'][0]
else:
return None
except ClientError:
return None
def create_compute_environment(module, client):
"""
Adds a Batch compute environment
:param module:
:param client:
:return:
"""
changed = False
# set API parameters
params = (
'compute_environment_name', 'type', 'service_role')
api_params = set_api_params(module, params)
if module.params['compute_environment_state'] is not None:
api_params['state'] = module.params['compute_environment_state']
compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
'spot_iam_fleet_role')
compute_resources_params = set_api_params(module, compute_resources_param_list)
if module.params['compute_resource_type'] is not None:
compute_resources_params['type'] = module.params['compute_resource_type']
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
api_params['computeResources'] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg='Error creating compute environment')
return changed
def remove_compute_environment(module, client):
"""
Remove a Batch compute environment
:param module:
:param client:
:return:
"""
changed = False
# set API parameters
api_params = {'computeEnvironment': module.params['compute_environment_name']}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg='Error removing compute environment')
return changed
def manage_state(module, client):
changed = False
current_state = 'absent'
state = module.params['state']
compute_environment_state = module.params['compute_environment_state']
compute_environment_name = module.params['compute_environment_name']
service_role = module.params['service_role']
minv_cpus = module.params['minv_cpus']
maxv_cpus = module.params['maxv_cpus']
desiredv_cpus = module.params['desiredv_cpus']
action_taken = 'none'
update_env_response = ''
check_mode = module.check_mode
# check if the compute environment exists
current_compute_environment = get_current_compute_environment(module, client)
response = current_compute_environment
if current_compute_environment:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Compute Environment configuration
compute_kwargs = {'computeEnvironment': compute_environment_name}
# Update configuration if needed
compute_resources = {}
if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
compute_kwargs.update({'state': compute_environment_state})
updates = True
if service_role and current_compute_environment['serviceRole'] != service_role:
compute_kwargs.update({'serviceRole': service_role})
updates = True
if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
compute_resources['minvCpus'] = minv_cpus
if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
compute_resources['maxvCpus'] = maxv_cpus
if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
compute_resources['desiredvCpus'] = desiredv_cpus
if len(compute_resources) > 0:
compute_kwargs['computeResources'] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = client.update_compute_environment(**compute_kwargs)
if not update_env_response:
module.fail_json(msg='Unable to get compute environment information after creating')
changed = True
action_taken = "updated"
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update environment.")
else:
# Create Batch Compute Environment
changed = create_compute_environment(module, client)
# Describe compute environment
action_taken = 'added'
response = get_current_compute_environment(module, client)
if not response:
module.fail_json(msg='Unable to get compute environment information after creating')
else:
if current_state == 'present':
# remove the compute environment
changed = remove_compute_environment(module, client)
action_taken = 'deleted'
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_compute_environment_action, response
"""
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent']),
compute_environment_name=dict(required=True),
type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
service_role=dict(required=True),
compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
minv_cpus=dict(type='int', required=True),
maxv_cpus=dict(type='int', required=True),
desiredv_cpus=dict(type='int'),
instance_types=dict(type='list', required=True),
image_id=dict(),
subnets=dict(type='list', required=True),
security_group_ids=dict(type='list', required=True),
ec2_key_pair=dict(),
instance_role=dict(required=True),
tags=dict(type='dict'),
bid_percentage=dict(type='int'),
spot_iam_fleet_role=dict(),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
client = module.client('batch')
validate_params(module)
results = manage_state(module, client)
module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
if __name__ == '__main__':
main()

@ -1,459 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_job_definition
short_description: Manage AWS Batch Job Definitions
description:
- This module allows the management of AWS Batch Job Definitions.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
job_definition_arn:
description:
- The ARN for the job definition.
type: str
job_definition_name:
description:
- The name for the job definition.
required: true
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
type:
description:
- The type of job definition.
required: true
type: str
parameters:
description:
- Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from
the job definition.
type: dict
image:
description:
- The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
Hub registry are available by default. Other repositories are specified with `` repository-url /image <colon>tag ``.
Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes,
and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker
Remote API and the IMAGE parameter of docker run.
required: true
type: str
vcpus:
description:
- The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container
section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to
1,024 CPU shares.
required: true
type: int
memory:
description:
- The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
specified here, the container is killed. This parameter maps to Memory in the Create a container section of the
Docker Remote API and the --memory option to docker run.
required: true
type: int
command:
description:
- The command that is passed to the container. This parameter maps to Cmd in the Create a container section of
the Docker Remote API and the COMMAND parameter to docker run. For more information,
see U(https://docs.docker.com/engine/reference/builder/#cmd).
type: list
elements: str
job_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
type: str
volumes:
description:
- A list of data volumes used in a job.
suboptions:
host:
description:
- The contents of the host parameter determine whether your data volume persists on the host container
instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
path for your data volume, but the data is not guaranteed to persist after the containers associated with
it stop running.
This is a dictionary with one property, sourcePath - The path on the host container
instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned
a host path for you. If the host parameter contains a sourcePath file location, then the data volume
persists at the specified location on the host container instance until you delete it manually. If the
sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the
location does exist, the contents of the source path folder are exported.
name:
description:
- The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
type: list
elements: dict
environment:
description:
- The environment variables to pass to a container. This parameter maps to Env in the Create a container section
of the Docker Remote API and the --env option to docker run.
suboptions:
name:
description:
- The name of the key value pair. For environment variables, this is the name of the environment variable.
value:
description:
- The value of the key value pair. For environment variables, this is the value of the environment variable.
type: list
elements: dict
mount_points:
description:
- The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container
section of the Docker Remote API and the --volume option to docker run.
suboptions:
containerPath:
description:
- The path on the container at which to mount the host volume.
readOnly:
description:
- If this value is true , the container has read-only access to the volume; otherwise, the container can write
to the volume. The default value is C(false).
sourceVolume:
description:
- The name of the volume to mount.
type: list
elements: dict
readonly_root_filesystem:
description:
- When this parameter is true, the container is given read-only access to its root file system. This parameter
maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option
to docker run.
type: str
privileged:
description:
- When this parameter is true, the container is given elevated privileges on the host container instance
(similar to the root user). This parameter maps to Privileged in the Create a container section of the
Docker Remote API and the --privileged option to docker run.
type: str
ulimits:
description:
- A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section
of the Docker Remote API and the --ulimit option to docker run.
suboptions:
hardLimit:
description:
- The hard limit for the ulimit type.
name:
description:
- The type of the ulimit.
softLimit:
description:
- The soft limit for the ulimit type.
type: list
elements: dict
user:
description:
- The user name to use inside the container. This parameter maps to User in the Create a container section of
the Docker Remote API and the --user option to docker run.
type: str
attempts:
description:
- Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10
attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that
many times.
type: int
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Job Definition
aws_batch_job_definition:
job_definition_name: My Batch Job Definition
state: present
type: container
parameters:
Param1: Val1
Param2: Val2
image: <Docker Image URL>
vcpus: 1
memory: 512
command:
- python
- run_my_script.py
- arg1
job_role_arn: <Job Role ARN>
attempts: 3
register: job_definition_create_result
- name: show results
debug: var=job_definition_create_result
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
aws_batch_job_definition_action: none
changed: false
response:
job_definition_arn: "arn:aws:batch:...."
job_definition_name: <name>
status: INACTIVE
type: container
type: dict
'''
from ansible.module_utils.aws.batch import cc, set_api_params
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
# logger = logging.getLogger()
# logging.basicConfig(filename='ansible_debug.log')
# logger.setLevel(logging.DEBUG)
def validate_params(module, batch_client):
"""
Performs basic parameter validation.
:param module:
:param batch_client:
:return:
"""
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Job Definition functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_job_definition(module, batch_client):
try:
environments = batch_client.describe_job_definitions(
jobDefinitionName=module.params['job_definition_name']
)
if len(environments['jobDefinitions']) > 0:
latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
None)
return latest_definition
return None
except ClientError:
return None
def create_job_definition(module, batch_client):
"""
Adds a Batch job definition
:param module:
:param batch_client:
:return:
"""
changed = False
# set API parameters
api_params = set_api_params(module, get_base_params())
container_properties_params = set_api_params(module, get_container_property_params())
retry_strategy_params = set_api_params(module, get_retry_strategy_params())
api_params['retryStrategy'] = retry_strategy_params
api_params['containerProperties'] = container_properties_params
try:
if not module.check_mode:
batch_client.register_job_definition(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Error registering job definition')
return changed
def get_retry_strategy_params():
return 'attempts',
def get_container_property_params():
return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
def get_base_params():
return 'job_definition_name', 'type', 'parameters'
def get_compute_environment_order_list(module):
compute_environment_order_list = []
for ceo in module.params['compute_environment_order']:
compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
return compute_environment_order_list
def remove_job_definition(module, batch_client):
"""
Remove a Batch job definition
:param module:
:param batch_client:
:return:
"""
changed = False
try:
if not module.check_mode:
batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Error removing job definition')
return changed
def job_definition_equal(module, current_definition):
equal = True
for param in get_base_params():
if module.params.get(param) != current_definition.get(cc(param)):
equal = False
break
for param in get_container_property_params():
if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
equal = False
break
for param in get_retry_strategy_params():
if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
equal = False
break
return equal
def manage_state(module, batch_client):
changed = False
current_state = 'absent'
state = module.params['state']
job_definition_name = module.params['job_definition_name']
action_taken = 'none'
response = None
check_mode = module.check_mode
# check if the job definition exists
current_job_definition = get_current_job_definition(module, batch_client)
if current_job_definition:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if definition has changed and register a new version if necessary
if not job_definition_equal(module, current_job_definition):
create_job_definition(module, batch_client)
action_taken = 'updated with new version'
changed = True
else:
# Create Job definition
changed = create_job_definition(module, batch_client)
action_taken = 'added'
response = get_current_job_definition(module, batch_client)
if not response:
module.fail_json(msg='Unable to get job definition information after creating/updating')
else:
if current_state == 'present':
# remove the Job definition
changed = remove_job_definition(module, batch_client)
action_taken = 'deregistered'
return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
job_definition_name=dict(required=True),
job_definition_arn=dict(),
type=dict(required=True),
parameters=dict(type='dict'),
image=dict(required=True),
vcpus=dict(type='int', required=True),
memory=dict(type='int', required=True),
command=dict(type='list', default=[]),
job_role_arn=dict(),
volumes=dict(type='list', default=[]),
environment=dict(type='list', default=[]),
mount_points=dict(type='list', default=[]),
readonly_root_filesystem=dict(),
privileged=dict(),
ulimits=dict(type='list', default=[]),
user=dict(),
attempts=dict(type='int')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
batch_client = module.client('batch')
validate_params(module, batch_client)
results = manage_state(module, batch_client)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,316 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_job_queue
short_description: Manage AWS Batch Job Queues
description:
- This module allows the management of AWS Batch Job Queues.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
job_queue_name:
description:
- The name for the job queue
required: true
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
job_queue_state:
description:
- The state of the job queue. If the job queue state is ENABLED , it is able to accept jobs.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
type: str
priority:
description:
- The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority
parameter) are evaluated first when associated with same compute environment. Priority is determined in
ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job
queue with a priority value of 10.
required: true
type: int
compute_environment_order:
description:
- The set of compute environments mapped to a job queue and their order relative to each other. The job
scheduler uses this parameter to determine which compute environment should execute a given job. Compute
environments must be in the VALID state before you can associate them with a job queue. You can associate up to
3 compute environments with a job queue.
required: true
type: list
elements: dict
suboptions:
order:
type: int
description: The relative priority of the environment.
compute_environment:
type: str
description: The name of the compute environment.
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Job Queue
aws_batch_job_queue:
job_queue_name: jobQueueName
state: present
region: us-east-1
job_queue_state: ENABLED
priority: 1
compute_environment_order:
- order: 1
compute_environment: my_compute_env1
- order: 2
compute_environment: my_compute_env2
register: batch_job_queue_action
- name: show results
debug:
var: batch_job_queue_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_job_queue_action: updated
changed: false
response:
job_queue_arn: "arn:aws:batch:...."
job_queue_name: <name>
priority: 1
state: DISABLED
status: UPDATING
status_reason: "JobQueue Healthy"
type: dict
'''
from ansible.module_utils.aws.batch import set_api_params
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def validate_params(module):
"""
Performs basic parameter validation.
:param module:
"""
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Job Queue functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_job_queue(module, client):
try:
environments = client.describe_job_queues(
jobQueues=[module.params['job_queue_name']]
)
return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
except ClientError:
return None
def create_job_queue(module, client):
"""
Adds a Batch job queue
:param module:
:param client:
:return:
"""
changed = False
# set API parameters
params = ('job_queue_name', 'priority')
api_params = set_api_params(module, params)
if module.params['job_queue_state'] is not None:
api_params['state'] = module.params['job_queue_state']
api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
try:
if not module.check_mode:
client.create_job_queue(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Error creating compute environment')
return changed
def get_compute_environment_order_list(module):
compute_environment_order_list = []
for ceo in module.params['compute_environment_order']:
compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
return compute_environment_order_list
def remove_job_queue(module, client):
"""
Remove a Batch job queue
:param module:
:param client:
:return:
"""
changed = False
# set API parameters
api_params = {'jobQueue': module.params['job_queue_name']}
try:
if not module.check_mode:
client.delete_job_queue(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Error removing job queue')
return changed
def manage_state(module, client):
changed = False
current_state = 'absent'
state = module.params['state']
job_queue_state = module.params['job_queue_state']
job_queue_name = module.params['job_queue_name']
priority = module.params['priority']
action_taken = 'none'
response = None
check_mode = module.check_mode
# check if the job queue exists
current_job_queue = get_current_job_queue(module, client)
if current_job_queue:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Job Queue configuration
job_kwargs = {'jobQueue': job_queue_name}
# Update configuration if needed
if job_queue_state and current_job_queue['state'] != job_queue_state:
job_kwargs.update({'state': job_queue_state})
updates = True
if priority is not None and current_job_queue['priority'] != priority:
job_kwargs.update({'priority': priority})
updates = True
new_compute_environment_order_list = get_compute_environment_order_list(module)
if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
updates = True
if updates:
try:
if not check_mode:
client.update_job_queue(**job_kwargs)
changed = True
action_taken = "updated"
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update job queue")
else:
# Create Job Queue
changed = create_job_queue(module, client)
action_taken = 'added'
# Describe job queue
response = get_current_job_queue(module, client)
if not response:
module.fail_json(msg='Unable to get job queue information after creating/updating')
else:
if current_state == 'present':
# remove the Job Queue
changed = remove_job_queue(module, client)
action_taken = 'deleted'
return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_job_queue_action, response
"""
argument_spec = dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
job_queue_name=dict(required=True),
job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
priority=dict(type='int', required=True),
compute_environment_order=dict(type='list', required=True),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
client = module.client('batch')
validate_params(module)
results = manage_state(module, client)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,408 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_codebuild
short_description: Create or delete an AWS CodeBuild project
notes:
- For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
description:
- Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
version_added: "2.9"
author:
- Stefan Horning (@stefanhorning) <horning@mediapeers.com>
requirements: [ botocore, boto3 ]
options:
name:
description:
- Name of the CodeBuild project.
required: true
type: str
description:
description:
- Descriptive text of the CodeBuild project.
type: str
source:
description:
- Configure service and location for the build input source.
required: true
suboptions:
type:
description:
- "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
required: true
type: str
location:
description:
- Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified.
type: str
git_clone_depth:
description:
- When using git you can specify the clone depth as an integer here.
type: int
buildspec:
description:
- The build spec declaration to use for the builds in this build project. Leave empty if part of the code project.
type: str
insecure_ssl:
description:
- Enable this flag to ignore SSL warnings while connecting to the project source code.
type: bool
type: dict
artifacts:
description:
- Information about the build output artifacts for the build project.
required: true
suboptions:
type:
description:
- "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
required: true
location:
description:
- Information about the build output artifact location. When choosing type S3, set the bucket name here.
path:
description:
- Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
- Used for path in S3 bucket when type is C(S3).
namespace_type:
description:
- Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
- Accepts C(BUILD_ID) and C(NONE).
- "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
name:
description:
- Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
packaging:
description:
- The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
type: dict
cache:
description:
- Caching params to speed up following builds.
suboptions:
type:
description:
- Cache type. Can be C(NO_CACHE) or C(S3).
required: true
location:
description:
- Caching location on S3.
required: true
type: dict
environment:
description:
- Information about the build environment for the build project.
suboptions:
type:
description:
- The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
required: true
image:
description:
- The ID of the Docker image to use for this build project.
required: true
compute_type:
description:
- Information about the compute resources the build project will use.
- "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
required: true
environment_variables:
description:
- A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
- "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
privileged_mode:
description:
- Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images.
type: dict
service_role:
description:
- The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
type: str
timeout_in_minutes:
description:
- How long CodeBuild should wait until timing out any build that has not been marked as completed.
default: 60
type: int
encryption_key:
description:
- The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
type: str
tags:
description:
- A set of tags for the build project.
type: list
elements: dict
suboptions:
key:
description: The name of the Tag.
type: str
value:
description: The value of the Tag.
type: str
vpc_config:
description:
- The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
type: dict
state:
description:
- Create or remove code build project.
default: 'present'
choices: ['present', 'absent']
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- aws_codebuild:
name: my_project
description: My nice little project
service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
source:
# Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
type: CODEPIPELINE
buildspec: ''
artifacts:
namespaceType: NONE
packaging: NONE
type: CODEPIPELINE
name: my_project
environment:
computeType: BUILD_GENERAL1_SMALL
privilegedMode: "true"
image: "aws/codebuild/docker:17.09.0"
type: LINUX_CONTAINER
environmentVariables:
- { name: 'PROFILE', value: 'staging' }
encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
region: us-east-1
state: present
'''
RETURN = '''
project:
description: Returns the dictionary describing the code project configuration.
returned: success
type: complex
contains:
name:
description: Name of the CodeBuild project
returned: always
type: str
sample: my_project
arn:
description: ARN of the CodeBuild project
returned: always
type: str
sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
description:
description: A description of the build project
returned: always
type: str
sample: My nice little project
source:
description: Information about the build input source code.
returned: always
type: complex
contains:
type:
description: The type of the repository
returned: always
type: str
sample: CODEPIPELINE
location:
description: Location identifier, depending on the source type.
returned: when configured
type: str
git_clone_depth:
description: The git clone depth
returned: when configured
type: int
build_spec:
description: The build spec declaration to use for the builds in this build project.
returned: always
type: str
auth:
description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
returned: when configured
type: complex
insecure_ssl:
description: True if set to ignore SSL warnings.
returned: when configured
type: bool
artifacts:
description: Information about the output of build artifacts
returned: always
type: complex
contains:
type:
description: The type of build artifact.
returned: always
type: str
sample: CODEPIPELINE
location:
description: Output location for build artifacts
returned: when configured
type: str
# and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
cache:
description: Cache settings for the build project.
returned: when configured
type: dict
environment:
description: Environment settings for the build
returned: always
type: dict
service_role:
description: IAM role to be used during build to access other AWS services.
returned: always
type: str
sample: arn:aws:iam::123123123:role/codebuild-service-role
timeout_in_minutes:
description: The timeout of a build in minutes
returned: always
type: int
sample: 60
tags:
description: Tags added to the project
returned: when configured
type: list
created:
description: Timestamp of the create time of the project
returned: always
type: str
sample: "2018-04-17T16:56:03.245000+02:00"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
def create_or_update_project(client, params, module):
resp = {}
name = params['name']
# clean up params
formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
# Check if project with that name already exists and if so update existing:
found = describe_project(client=client, name=name, module=module)
changed = False
if 'name' in found:
found_project = found
resp = update_project(client=client, params=formatted_update_params, module=module)
updated_project = resp['project']
# Prep both dicts for sensible change comparison:
found_project.pop('lastModified')
updated_project.pop('lastModified')
if 'tags' not in updated_project:
updated_project['tags'] = []
if updated_project != found_project:
changed = True
return resp, changed
# Or create new project:
try:
resp = client.create_project(**formatted_create_params)
changed = True
return resp, changed
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to create CodeBuild project")
def update_project(client, params, module):
name = params['name']
try:
resp = client.update_project(**params)
return resp
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to update CodeBuild project")
def delete_project(client, name, module):
found = describe_project(client=client, name=name, module=module)
changed = False
if 'name' in found:
# Mark as changed when a project with that name existed before calling delete
changed = True
try:
resp = client.delete_project(name=name)
return resp, changed
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
def describe_project(client, name, module):
project = {}
try:
projects = client.batch_get_projects(names=[name])['projects']
if len(projects) > 0:
project = projects[0]
return project
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
source=dict(required=True, type='dict'),
artifacts=dict(required=True, type='dict'),
cache=dict(type='dict'),
environment=dict(type='dict'),
service_role=dict(),
timeout_in_minutes=dict(type='int', default=60),
encryption_key=dict(),
tags=dict(type='list'),
vpc_config=dict(type='dict'),
state=dict(choices=['present', 'absent'], default='present')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client_conn = module.client('codebuild')
state = module.params.get('state')
changed = False
if state == 'present':
project_result, changed = create_or_update_project(
client=client_conn,
params=module.params,
module=module)
elif state == 'absent':
project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result))
if __name__ == '__main__':
main()

@ -1,247 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_codecommit
version_added: "2.8"
short_description: Manage repositories in AWS CodeCommit
description:
- Supports creation and deletion of CodeCommit repositories.
- See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
author: Shuang Wang (@ptux)
requirements:
- botocore
- boto3
- python >= 2.6
options:
name:
description:
- name of repository.
required: true
type: str
description:
description:
- description or comment of repository.
required: false
aliases:
- comment
type: str
state:
description:
- Specifies the state of repository.
required: true
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
repository_metadata:
description: "Information about the repository."
returned: always
type: complex
contains:
account_id:
description: "The ID of the AWS account associated with the repository."
returned: when state is present
type: str
sample: "268342293637"
arn:
description: "The Amazon Resource Name (ARN) of the repository."
returned: when state is present
type: str
sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
clone_url_http:
description: "The URL to use for cloning the repository over HTTPS."
returned: when state is present
type: str
sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
clone_url_ssh:
description: "The URL to use for cloning the repository over SSH."
returned: when state is present
type: str
sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
creation_date:
description: "The date and time the repository was created, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
last_modified_date:
description: "The date and time the repository was last modified, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
repository_description:
description: "A comment or description about the repository."
returned: when state is present
type: str
sample: "test from ptux"
repository_id:
description: "The ID of the repository that was created or deleted"
returned: always
type: str
sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
repository_name:
description: "The repository's name."
returned: when state is present
type: str
sample: "reponame"
response_metadata:
description: "Information about the response."
returned: always
type: complex
contains:
http_headers:
description: "http headers of http response"
returned: always
type: dict
http_status_code:
description: "http status code of http response"
returned: always
type: str
sample: "200"
request_id:
description: "http request id"
returned: always
type: str
sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
retry_attempts:
description: "numbers of retry attempts"
returned: always
type: str
sample: "0"
'''
EXAMPLES = '''
# Create a new repository
- aws_codecommit:
name: repo
state: present
# Delete a repository
- aws_codecommit:
name: repo
state: absent
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class CodeCommit(object):
def __init__(self, module=None):
self._module = module
self._client = self._module.client('codecommit')
self._check_mode = self._module.check_mode
def process(self):
result = dict(changed=False)
if self._module.params['state'] == 'present':
if not self._repository_exists():
if not self._check_mode:
result = self._create_repository()
result['changed'] = True
else:
metadata = self._get_repository()['repositoryMetadata']
if metadata['repositoryDescription'] != self._module.params['description']:
if not self._check_mode:
self._update_repository()
result['changed'] = True
result.update(self._get_repository())
if self._module.params['state'] == 'absent' and self._repository_exists():
if not self._check_mode:
result = self._delete_repository()
result['changed'] = True
return result
def _repository_exists(self):
try:
paginator = self._client.get_paginator('list_repositories')
for page in paginator.paginate():
repositories = page['repositories']
for item in repositories:
if self._module.params['name'] in item.values():
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
return False
def _get_repository(self):
try:
result = self._client.get_repository(
repositoryName=self._module.params['name']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
return result
def _update_repository(self):
try:
result = self._client.update_repository_description(
repositoryName=self._module.params['name'],
repositoryDescription=self._module.params['description']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
return result
def _create_repository(self):
try:
result = self._client.create_repository(
repositoryName=self._module.params['name'],
repositoryDescription=self._module.params['description']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
return result
def _delete_repository(self):
try:
result = self._client.delete_repository(
repositoryName=self._module.params['name']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't delete repository")
return result
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(choices=['present', 'absent'], required=True),
description=dict(default='', aliases=['comment'])
)
ansible_aws_module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
aws_codecommit = CodeCommit(module=ansible_aws_module)
result = aws_codecommit.process()
ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()

@ -1,320 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_codepipeline
short_description: Create or delete AWS CodePipelines
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html)
description:
- Create or delete a CodePipeline on AWS.
version_added: "2.9"
author:
- Stefan Horning (@stefanhorning) <horning@mediapeers.com>
requirements: [ botocore, boto3 ]
options:
name:
description:
- Name of the pipeline
required: true
type: str
role_arn:
description:
- ARN of the IAM role to use when executing the pipeline
required: true
type: str
artifact_store:
description:
- Location information where artifacts are stored (on S3). Dictionary with fields type and location.
required: true
suboptions:
type:
description:
- Type of the artifacts storage (only 'S3' is currently supported).
type: str
location:
description:
- Bucket name for artifacts.
type: str
type: dict
stages:
description:
- List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
required: true
suboptions:
name:
description:
- Name of the stage (step) in the codepipeline
type: str
actions:
description:
- List of action configurations for that stage.
- 'See the boto3 documentation for full documentation of suboptions:'
- 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)'
type: list
elements: dict
elements: dict
type: list
version:
description:
- Version number of the pipeline. This number is automatically incremented when a pipeline is updated.
required: false
type: int
state:
description:
- Create or remove code pipeline
default: 'present'
choices: ['present', 'absent']
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
- aws_codepipeline:
name: my_deploy_pipeline
role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
artifact_store:
type: S3
location: my_s3_codepipline_bucket
stages:
- name: Get_source
actions:
-
name: Git_pull
actionTypeId:
category: Source
owner: ThirdParty
provider: GitHub
version: '1'
outputArtifacts:
- { name: my-app-source }
configuration:
Owner: mediapeers
Repo: my_gh_repo
PollForSourceChanges: 'true'
Branch: master
# Generate token like this:
# https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
# GH Link: https://github.com/settings/tokens
OAuthToken: 'abc123def456'
runOrder: 1
- name: Build
actions:
-
name: CodeBuild
actionTypeId:
category: Build
owner: AWS
provider: CodeBuild
version: '1'
inputArtifacts:
- { name: my-app-source }
outputArtifacts:
- { name: my-app-build }
configuration:
# A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
ProjectName: codebuild-project-name
runOrder: 1
- name: ECS_deploy
actions:
-
name: ECS_deploy
actionTypeId:
category: Deploy
owner: AWS
provider: ECS
version: '1'
inputArtifacts:
- { name: vod-api-app-build }
configuration:
# an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
ClusterName: ecs-cluster-name
ServiceName: ecs-cluster-service-name
FileName: imagedefinitions.json
region: us-east-1
state: present
'''
RETURN = '''
pipeline:
description: Returns the dictionary describing the code pipeline configuration.
returned: success
type: complex
contains:
name:
description: Name of the CodePipeline
returned: always
type: str
sample: my_deploy_pipeline
role_arn:
description: ARN of the IAM role attached to the code pipeline
returned: always
type: str
sample: arn:aws:iam::123123123:role/codepipeline-service-role
artifact_store:
description: Information about where the build artifacts are stored
returned: always
type: complex
contains:
type:
description: The type of the artifacts store, such as S3
returned: always
type: str
sample: S3
location:
description: The location of the artifacts storage (s3 bucket name)
returned: always
type: str
sample: my_s3_codepipline_bucket
encryption_key:
description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
returned: when configured
type: str
stages:
description: List of stages configured for this pipeline
returned: always
type: list
version:
description: The version number of the pipeline. This number is auto incremented when pipeline params are changed.
returned: always
type: int
'''
import copy
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
if version:
pipeline_dict['version'] = version
try:
resp = client.create_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def update_pipeline(client, pipeline_dict, module):
try:
resp = client.update_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc())
def delete_pipeline(client, name, module):
try:
resp = client.delete_pipeline(name=name)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def describe_pipeline(client, name, version, module):
pipeline = {}
try:
if version is not None:
pipeline = client.get_pipeline(name=name, version=version)
return pipeline
else:
pipeline = client.get_pipeline(name=name)
return pipeline
except is_boto3_error_code('PipelineNotFoundException'):
return pipeline
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
role_arn=dict(required=True, type='str'),
artifact_store=dict(required=True, type='dict'),
stages=dict(required=True, type='list'),
version=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client_conn = module.client('codepipeline')
state = module.params.get('state')
changed = False
# Determine if the CodePipeline exists
found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
pipeline_result = {}
if state == 'present':
if 'pipeline' in found_code_pipeline:
pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
# Update dictionary with provided module params:
pipeline_dict['roleArn'] = module.params['role_arn']
pipeline_dict['artifactStore'] = module.params['artifact_store']
pipeline_dict['stages'] = module.params['stages']
if module.params['version'] is not None:
pipeline_dict['version'] = module.params['version']
pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
changed = True
else:
pipeline_result = create_pipeline(
client=client_conn,
name=module.params['name'],
role_arn=module.params['role_arn'],
artifact_store=module.params['artifact_store'],
stages=module.params['stages'],
version=module.params['version'],
module=module)
changed = True
elif state == 'absent':
if found_code_pipeline:
pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
changed = True
module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
if __name__ == '__main__':
main()

@ -1,163 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_aggregation_authorization
short_description: Manage cross-account AWS Config authorizations
description:
- Module manages AWS Config resources.
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
authorized_account_id:
description:
- The 12-digit account ID of the account authorized to aggregate data.
type: str
required: true
authorized_aws_region:
description:
- The region authorized to collect aggregated data.
type: str
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Get current account ID
aws_caller_info:
register: whoami
- aws_config_aggregation_authorization:
state: present
authorized_account_id: '{{ whoami.account }}'
authorzed_aws_region: us-east-1
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry
def resource_exists(client, module, params):
try:
current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
authorization_exists = next(
(item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
None
)
if authorization_exists:
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
return False
def create_resource(client, module, params, result):
try:
response = client.put_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
def update_resource(client, module, params, result):
current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
current_params = next(
(item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
None
)
del current_params['AggregationAuthorizationArn']
del current_params['CreationTime']
if params != current_params:
try:
response = client.put_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
def delete_resource(client, module, params, result):
try:
response = client.delete_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
def main():
module = AnsibleAWSModule(
argument_spec={
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'authorized_account_id': dict(type='str', required=True),
'authorized_aws_region': dict(type='str', required=True),
},
supports_check_mode=False,
)
result = {'changed': False}
params = {
'AuthorizedAccountId': module.params.get('authorized_account_id'),
'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
}
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if module.params.get('state') == 'present':
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if module.params.get('state') == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()

@ -1,232 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_aggregator
short_description: Manage AWS Config aggregations across multiple accounts
description:
- Module manages AWS Config resources
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
type: str
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
account_sources:
description:
- Provides a list of source accounts and regions to be aggregated.
suboptions:
account_ids:
description:
- A list of 12-digit account IDs of accounts being aggregated.
type: list
elements: str
aws_regions:
description:
- A list of source regions being aggregated.
type: list
elements: str
all_aws_regions:
description:
- If true, aggregate existing AWS Config regions and future regions.
type: bool
type: list
elements: dict
required: true
organization_source:
description:
- The region authorized to collect aggregated data.
suboptions:
role_arn:
description:
- ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.
type: str
aws_regions:
description:
- The source regions being aggregated.
type: list
elements: str
all_aws_regions:
description:
- If true, aggregate existing AWS Config regions and future regions.
type: bool
type: dict
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create cross-account aggregator
aws_config_aggregator:
name: test_config_rule
state: present
account_sources:
account_ids:
- 1234567890
- 0123456789
- 9012345678
all_aws_regions: yes
'''
RETURN = '''#'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
def resource_exists(client, module, params):
try:
aggregator = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
return aggregator['ConfigurationAggregators'][0]
except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def update_resource(client, module, params, result):
current_params = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
del current_params['ConfigurationAggregatorArn']
del current_params['CreationTime']
del current_params['LastUpdatedTime']
if params != current_params['ConfigurationAggregators'][0]:
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def delete_resource(client, module, params, result):
try:
client.delete_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'account_sources': dict(type='list', required=True),
'organization_source': dict(type='dict', required=True)
},
supports_check_mode=False,
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['ConfigurationAggregatorName'] = name
if module.params.get('account_sources'):
params['AccountAggregationSources'] = []
for i in module.params.get('account_sources'):
tmp_dict = {}
if i.get('account_ids'):
tmp_dict['AccountIds'] = i.get('account_ids')
if i.get('aws_regions'):
tmp_dict['AwsRegions'] = i.get('aws_regions')
if i.get('all_aws_regions') is not None:
tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
params['AccountAggregationSources'].append(tmp_dict)
if module.params.get('organization_source'):
params['OrganizationAggregationSource'] = {}
if module.params.get('organization_source').get('role_arn'):
params['OrganizationAggregationSource'].update({
'RoleArn': module.params.get('organization_source').get('role_arn')
})
if module.params.get('organization_source').get('aws_regions'):
params['OrganizationAggregationSource'].update({
'AwsRegions': module.params.get('organization_source').get('aws_regions')
})
if module.params.get('organization_source').get('all_aws_regions') is not None:
params['OrganizationAggregationSourcep'].update({
'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()

@ -1,219 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_delivery_channel
short_description: Manage AWS Config delivery channels
description:
- This module manages AWS Config delivery locations for rule checks and configuration info.
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
type: str
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
s3_bucket:
description:
- The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files.
type: str
required: true
s3_prefix:
description:
- The prefix for the specified Amazon S3 bucket.
type: str
sns_topic_arn:
description:
- The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.
type: str
delivery_frequency:
description:
- The frequency with which AWS Config delivers configuration snapshots.
choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create Delivery Channel for AWS Config
aws_config_delivery_channel:
name: test_delivery_channel
state: present
s3_bucket: 'test_aws_config_bucket'
sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
delivery_frequency: 'Twelve_Hours'
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
# this waits for an IAM role to become fully available, at the cost of
# taking a long time to fail when the IAM role/policy really is invalid
retry_unavailable_iam_on_put_delivery = AWSRetry.backoff(
catch_extra_error_codes=['InsufficientDeliveryPolicyException'],
)
def resource_exists(client, module, params):
try:
channel = client.describe_delivery_channels(
DeliveryChannelNames=[params['name']],
aws_retry=True,
)
return channel['DeliveryChannels'][0]
except is_boto3_error_code('NoSuchDeliveryChannelException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
retry_unavailable_iam_on_put_delivery(
client.put_delivery_channel,
)(
DeliveryChannel=params,
)
result['changed'] = True
result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
"Make sure the bucket exists and is available")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
def update_resource(client, module, params, result):
current_params = client.describe_delivery_channels(
DeliveryChannelNames=[params['name']],
aws_retry=True,
)
if params != current_params['DeliveryChannels'][0]:
try:
retry_unavailable_iam_on_put_delivery(
client.put_delivery_channel,
)(
DeliveryChannel=params,
)
result['changed'] = True
result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
"Make sure the bucket exists and is available")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
def delete_resource(client, module, params, result):
try:
response = client.delete_delivery_channel(
DeliveryChannelName=params['name']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
's3_bucket': dict(type='str', required=True),
's3_prefix': dict(type='str'),
'sns_topic_arn': dict(type='str'),
'delivery_frequency': dict(
type='str',
choices=[
'One_Hour',
'Three_Hours',
'Six_Hours',
'Twelve_Hours',
'TwentyFour_Hours'
]
),
},
supports_check_mode=False,
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['name'] = name
if module.params.get('s3_bucket'):
params['s3BucketName'] = module.params.get('s3_bucket')
if module.params.get('s3_prefix'):
params['s3KeyPrefix'] = module.params.get('s3_prefix')
if module.params.get('sns_topic_arn'):
params['snsTopicARN'] = module.params.get('sns_topic_arn')
if module.params.get('delivery_frequency'):
params['configSnapshotDeliveryProperties'] = {
'deliveryFrequency': module.params.get('delivery_frequency')
}
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,213 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_recorder
short_description: Manage AWS Config Recorders
description:
- Module manages AWS Config configuration recorder settings.
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
type: str
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
role_arn:
description:
- Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
- Required when I(state=present).
type: str
recording_group:
description:
- Specifies the types of AWS resources for which AWS Config records configuration changes.
- Required when I(state=present)
suboptions:
all_supported:
description:
- Specifies whether AWS Config records configuration changes for every supported type of regional resource.
- If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts
recording resources of that type automatically.
- If I(all_supported=true), you cannot enumerate a list of I(resource_types).
include_global_types:
description:
- Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
with the resources that it records.
- The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
you should consider customizing AWS Config in only one region to record global resources.
- If you set I(include_global_types=true), you must also set I(all_supported=true).
- If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording
resources of that type automatically.
resource_types:
description:
- A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)).
- Before you can set this option, you must set I(all_supported=false).
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create Configuration Recorder for AWS Config
aws_config_recorder:
name: test_configuration_recorder
state: present
role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
recording_group:
all_supported: true
include_global_types: true
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
def resource_exists(client, module, params):
try:
recorder = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
return recorder['ConfigurationRecorders'][0]
except is_boto3_error_code('NoSuchConfigurationRecorderException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
def update_resource(client, module, params, result):
current_params = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
if params != current_params['ConfigurationRecorders'][0]:
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
def delete_resource(client, module, params, result):
try:
response = client.delete_configuration_recorder(
ConfigurationRecorderName=params['name']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'role_arn': dict(type='str'),
'recording_group': dict(type='dict'),
},
supports_check_mode=False,
required_if=[
('state', 'present', ['role_arn', 'recording_group']),
],
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['name'] = name
if module.params.get('role_arn'):
params['roleARN'] = module.params.get('role_arn')
if module.params.get('recording_group'):
params['recordingGroup'] = {}
if module.params.get('recording_group').get('all_supported') is not None:
params['recordingGroup'].update({
'allSupported': module.params.get('recording_group').get('all_supported')
})
if module.params.get('recording_group').get('include_global_types') is not None:
params['recordingGroup'].update({
'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
})
if module.params.get('recording_group').get('resource_types'):
params['recordingGroup'].update({
'resourceTypes': module.params.get('recording_group').get('resource_types')
})
else:
params['recordingGroup'].update({
'resourceTypes': []
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()

@ -1,275 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_rule
short_description: Manage AWS Config resources
description:
- Module manages AWS Config rules
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
type: str
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
description:
description:
- The description that you provide for the AWS Config rule.
type: str
scope:
description:
- Defines which resources can trigger an evaluation for the rule.
suboptions:
compliance_types:
description:
- The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
You can only specify one type if you also specify a resource ID for I(compliance_id).
compliance_id:
description:
- The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID,
you must specify one resource type for I(compliance_types).
tag_key:
description:
- The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule.
tag_value:
description:
- The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
If you specify a value for I(tag_value), you must also specify a value for I(tag_key).
type: dict
source:
description:
- Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to
evaluate your AWS resources.
suboptions:
owner:
description:
- The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
You can only specify one type if you also specify a resource ID for I(compliance_id).
identifier:
description:
- The ID of the only AWS resource that you want to trigger an evaluation for the rule.
If you specify a resource ID, you must specify one resource type for I(compliance_types).
details:
description:
- Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.
- This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs.
- Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.
- Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule.
- Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger.
type: dict
required: true
input_parameters:
description:
- A string, in JSON format, that is passed to the AWS Config rule Lambda function.
type: str
execution_frequency:
description:
- The maximum frequency with which AWS Config runs evaluations for a rule.
choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create Config Rule for AWS Config
aws_config_rule:
name: test_config_rule
state: present
description: 'This AWS Config rule checks for public write access on S3 buckets'
scope:
compliance_types:
- 'AWS::S3::Bucket'
source:
owner: AWS
identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
def rule_exists(client, module, params):
try:
rule = client.describe_config_rules(
ConfigRuleNames=[params['ConfigRuleName']],
aws_retry=True,
)
return rule['ConfigRules'][0]
except is_boto3_error_code('NoSuchConfigRuleException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
client.put_config_rule(
ConfigRule=params
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
def update_resource(client, module, params, result):
current_params = client.describe_config_rules(
ConfigRuleNames=[params['ConfigRuleName']],
aws_retry=True,
)
del current_params['ConfigRules'][0]['ConfigRuleArn']
del current_params['ConfigRules'][0]['ConfigRuleId']
if params != current_params['ConfigRules'][0]:
try:
client.put_config_rule(
ConfigRule=params
)
result['changed'] = True
result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
def delete_resource(client, module, params, result):
try:
response = client.delete_config_rule(
ConfigRuleName=params['ConfigRuleName'],
aws_retry=True,
)
result['changed'] = True
result['rule'] = {}
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config rule")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'description': dict(type='str'),
'scope': dict(type='dict'),
'source': dict(type='dict', required=True),
'input_parameters': dict(type='str'),
'execution_frequency': dict(
type='str',
choices=[
'One_Hour',
'Three_Hours',
'Six_Hours',
'Twelve_Hours',
'TwentyFour_Hours'
]
),
},
supports_check_mode=False,
)
result = {
'changed': False
}
name = module.params.get('name')
resource_type = module.params.get('resource_type')
state = module.params.get('state')
params = {}
if name:
params['ConfigRuleName'] = name
if module.params.get('description'):
params['Description'] = module.params.get('description')
if module.params.get('scope'):
params['Scope'] = {}
if module.params.get('scope').get('compliance_types'):
params['Scope'].update({
'ComplianceResourceTypes': module.params.get('scope').get('compliance_types')
})
if module.params.get('scope').get('tag_key'):
params['Scope'].update({
'TagKey': module.params.get('scope').get('tag_key')
})
if module.params.get('scope').get('tag_value'):
params['Scope'].update({
'TagValue': module.params.get('scope').get('tag_value')
})
if module.params.get('scope').get('compliance_id'):
params['Scope'].update({
'ComplianceResourceId': module.params.get('scope').get('compliance_id')
})
if module.params.get('source'):
params['Source'] = {}
if module.params.get('source').get('owner'):
params['Source'].update({
'Owner': module.params.get('source').get('owner')
})
if module.params.get('source').get('identifier'):
params['Source'].update({
'SourceIdentifier': module.params.get('source').get('identifier')
})
if module.params.get('source').get('details'):
params['Source'].update({
'SourceDetails': module.params.get('source').get('details')
})
if module.params.get('input_parameters'):
params['InputParameters'] = module.params.get('input_parameters')
if module.params.get('execution_frequency'):
params['MaximumExecutionFrequency'] = module.params.get('execution_frequency')
params['ConfigRuleState'] = 'ACTIVE'
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
existing_rule = rule_exists(client, module, params)
if state == 'present':
if not existing_rule:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if state == 'absent':
if existing_rule:
delete_resource(client, module, params, result)
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,343 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_connection
short_description: Creates, deletes, modifies a DirectConnect connection
description:
- Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
The connection may later be associated or disassociated with a link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect connection.
choices:
- present
- absent
type: str
required: true
name:
description:
- The name of the Direct Connect connection. This is required to create a
new connection.
- One of I(connection_id) or I(name) must be specified.
type: str
connection_id:
description:
- The ID of the Direct Connect connection.
- Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID.
- One of I(connection_id) or I(name) must be specified.
type: str
location:
description:
- Where the Direct Connect connection is located.
- Required when I(state=present).
type: str
bandwidth:
description:
- The bandwidth of the Direct Connect connection.
- Required when I(state=present).
choices:
- 1Gbps
- 10Gbps
type: str
link_aggregation_group:
description:
- The ID of the link aggregation group you want to associate with the connection.
- This is optional when a stand-alone connection is desired.
type: str
forced_update:
description:
- To modify bandwidth or location the connection will need to be deleted and recreated.
By default this will not happen - this option must be set to True.
type: bool
"""
EXAMPLES = """
# create a Direct Connect connection
- aws_direct_connect_connection:
name: ansible-test-connection
state: present
location: EqDC2
link_aggregation_group: dxlag-xxxxxxxx
bandwidth: 1Gbps
register: dc
# disassociate the LAG from the connection
- aws_direct_connect_connection:
state: present
connection_id: dc.connection.connection_id
location: EqDC2
bandwidth: 1Gbps
# replace the connection with one with more bandwidth
- aws_direct_connect_connection:
state: present
name: ansible-test-connection
location: EqDC2
bandwidth: 10Gbps
forced_update: True
# delete the connection
- aws_direct_connect_connection:
state: absent
name: ansible-test-connection
"""
RETURN = """
connection:
description: The attributes of the direct connect connection.
type: complex
returned: I(state=present)
contains:
aws_device:
description: The endpoint which the physical connection terminates on.
returned: when the requested state is no longer 'requested'
type: str
sample: EqDC2-12pmo7hemtz1z
bandwidth:
description: The bandwidth of the connection.
returned: always
type: str
sample: 1Gbps
connection_id:
description: The ID of the connection.
returned: always
type: str
sample: dxcon-ffy9ywed
connection_name:
description: The name of the connection.
returned: always
type: str
sample: ansible-test-connection
connection_state:
description: The state of the connection.
returned: always
type: str
sample: pending
loa_issue_time:
description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment.
returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested')
type: str
sample: '2018-03-20T17:36:26-04:00'
location:
description: The location of the connection.
returned: always
type: str
sample: EqDC2
owner_account:
description: The account that owns the direct connect connection.
returned: always
type: str
sample: '123456789012'
region:
description: The region in which the connection exists.
returned: always
type: str
sample: us-east-1
"""
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry)
from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_connection,
associate_connection_and_lag, disassociate_connection_and_lag)
try:
from botocore.exceptions import BotoCoreError, ClientError
except Exception:
pass
# handled by imported AnsibleAWSModule
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
def connection_status(client, connection_id):
return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
def connection_exists(client, connection_id=None, connection_name=None, verify=True):
params = {}
if connection_id:
params['connectionId'] = connection_id
try:
response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params)
except (BotoCoreError, ClientError) as e:
if connection_id:
msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
else:
msg = "Failed to describe DirectConnect connections"
raise DirectConnectError(msg=msg,
last_traceback=traceback.format_exc(),
exception=e)
match = []
connection = []
# look for matching connections
if len(response.get('connections', [])) == 1 and connection_id:
if response['connections'][0]['connectionState'] != 'deleted':
match.append(response['connections'][0]['connectionId'])
connection.extend(response['connections'])
for conn in response.get('connections', []):
if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
match.append(conn['connectionId'])
connection.append(conn)
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
elif len(connection) == 1:
return {'connection': connection[0]}
return {'connection': {}}
def create_connection(client, location, bandwidth, name, lag_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': name,
}
if lag_id:
params['lagId'] = lag_id
try:
connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params)
except (BotoCoreError, ClientError) as e:
raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
last_traceback=traceback.format_exc(),
exception=e)
return connection['connectionId']
def changed_properties(current_status, location, bandwidth):
current_bandwidth = current_status['bandwidth']
current_location = current_status['location']
return current_bandwidth != bandwidth or current_location != location
@AWSRetry.backoff(**retry_params)
def update_associations(client, latest_state, connection_id, lag_id):
changed = False
if 'lagId' in latest_state and lag_id != latest_state['lagId']:
disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
changed = True
if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
associate_connection_and_lag(client, connection_id, lag_id)
changed = True
return changed
def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
# the connection is found; get the latest state and see if it needs to be updated
if connection_id:
latest_state = connection_status(client, connection_id=connection_id)['connection']
if changed_properties(latest_state, location, bandwidth) and forced_update:
ensure_absent(client, connection_id)
return ensure_present(client=client,
connection_id=None,
connection_name=connection_name,
location=location,
bandwidth=bandwidth,
lag_id=lag_id,
forced_update=forced_update)
elif update_associations(client, latest_state, connection_id, lag_id):
return True, connection_id
# no connection found; create a new one
else:
return True, create_connection(client, location, bandwidth, connection_name, lag_id)
return False, connection_id
@AWSRetry.backoff(**retry_params)
def ensure_absent(client, connection_id):
changed = False
if connection_id:
delete_connection(client, connection_id)
changed = True
return changed
def main():
argument_spec = dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
location=dict(),
bandwidth=dict(choices=['1Gbps', '10Gbps']),
link_aggregation_group=dict(),
connection_id=dict(),
forced_update=dict(type='bool', default=False)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_one_of=[('connection_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))]
)
connection = module.client('directconnect')
state = module.params.get('state')
try:
connection_id = connection_exists(
connection,
connection_id=module.params.get('connection_id'),
connection_name=module.params.get('name')
)
if not connection_id and module.params.get('connection_id'):
module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
if state == 'present':
changed, connection_id = ensure_present(connection,
connection_id=connection_id,
connection_name=module.params.get('name'),
location=module.params.get('location'),
bandwidth=module.params.get('bandwidth'),
lag_id=module.params.get('link_aggregation_group'),
forced_update=module.params.get('forced_update'))
response = connection_status(connection, connection_id)
elif state == 'absent':
changed = ensure_absent(connection, connection_id)
response = {}
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()

@ -1,374 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_direct_connect_gateway
author: Gobin Sougrakpam (@gobins)
version_added: "2.5"
short_description: Manage AWS Direct Connect gateway
description:
- Creates AWS Direct Connect Gateway.
- Deletes AWS Direct Connect Gateway.
- Attaches Virtual Gateways to Direct Connect Gateway.
- Detaches Virtual Gateways to Direct Connect Gateway.
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3 ]
options:
state:
description:
- Set I(state=present) to ensure a resource is created.
- Set I(state=absent) to remove a resource.
default: present
choices: [ "present", "absent"]
type: str
name:
description:
- Name of the Direct Connect Gateway to be created or deleted.
type: str
amazon_asn:
description:
- The Amazon side ASN.
- Required when I(state=present).
type: str
direct_connect_gateway_id:
description:
- The ID of an existing Direct Connect Gateway.
- Required when I(state=absent).
type: str
virtual_gateway_id:
description:
- The VPN gateway ID of an existing virtual gateway.
type: str
wait_timeout:
description:
- How long to wait for the association to be deleted.
type: int
default: 320
'''
EXAMPLES = '''
- name: Create a new direct connect gateway attached to virtual private gateway
dxgw:
state: present
name: my-dx-gateway
amazon_asn: 7224
virtual_gateway_id: vpg-12345
register: created_dxgw
- name: Create a new unattached dxgw
dxgw:
state: present
name: my-dx-gateway
amazon_asn: 7224
register: created_dxgw
'''
RETURN = '''
result:
description:
- The attributes of the Direct Connect Gateway
type: complex
returned: I(state=present)
contains:
amazon_side_asn:
description: ASN on the amazon side.
type: str
direct_connect_gateway_id:
description: The ID of the direct connect gateway.
type: str
direct_connect_gateway_name:
description: The name of the direct connect gateway.
type: str
direct_connect_gateway_state:
description: The state of the direct connect gateway.
type: str
owner_account:
description: The AWS account ID of the owner of the direct connect gateway.
type: str
'''
import time
import traceback
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info, boto3_conn)
from ansible.module_utils._text import to_native
def dx_gateway_info(client, gateway_id, module):
try:
resp = client.describe_direct_connect_gateways(
directConnectGatewayId=gateway_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if resp['directConnectGateways']:
return resp['directConnectGateways'][0]
def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
polling_increment_secs = 15
max_retries = 3
status_achieved = False
for x in range(0, max_retries):
try:
response = check_dxgw_association(
client,
module,
gateway_id=gateway_id,
virtual_gateway_id=virtual_gateway_id)
if response['directConnectGatewayAssociations']:
if response['directConnectGatewayAssociations'][0]['associationState'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
else:
status_achieved = True
break
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
result = response
return status_achieved, result
def associate_direct_connect_gateway(client, module, gateway_id):
params = dict()
params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
try:
response = client.create_direct_connect_gateway_association(
directConnectGatewayId=gateway_id,
virtualGatewayId=params['virtual_gateway_id'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating')
if not status_achieved:
module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console')
result = response
return result
def delete_association(client, module, gateway_id, virtual_gateway_id):
try:
response = client.delete_direct_connect_gateway_association(
directConnectGatewayId=gateway_id,
virtualGatewayId=virtual_gateway_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating')
if not status_achieved:
module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console')
result = response
return result
def create_dx_gateway(client, module):
params = dict()
params['name'] = module.params.get('name')
params['amazon_asn'] = module.params.get('amazon_asn')
try:
response = client.create_direct_connect_gateway(
directConnectGatewayName=params['name'],
amazonSideAsn=int(params['amazon_asn']))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
result = response
return result
def find_dx_gateway(client, module, gateway_id=None):
params = dict()
gateways = list()
if gateway_id is not None:
params['directConnectGatewayId'] = gateway_id
while True:
try:
resp = client.describe_direct_connect_gateways(**params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
gateways.extend(resp['directConnectGateways'])
if 'nextToken' in resp:
params['nextToken'] = resp['nextToken']
else:
break
if gateways != []:
count = 0
for gateway in gateways:
if module.params.get('name') == gateway['directConnectGatewayName']:
count += 1
return gateway
return None
def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None):
try:
if virtual_gateway_id is None:
resp = client.describe_direct_connect_gateway_associations(
directConnectGatewayId=gateway_id
)
else:
resp = client.describe_direct_connect_gateway_associations(
directConnectGatewayId=gateway_id,
virtualGatewayId=virtual_gateway_id,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
return resp
def ensure_present(client, module):
# If an existing direct connect gateway matches our args
# then a match is considered to have been found and we will not create another dxgw.
changed = False
params = dict()
result = dict()
params['name'] = module.params.get('name')
params['amazon_asn'] = module.params.get('amazon_asn')
params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
# check if a gateway matching our module args already exists
existing_dxgw = find_dx_gateway(client, module)
if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted':
gateway_id = existing_dxgw['directConnectGatewayId']
# if a gateway_id was provided, check if it is attach to the DXGW
if params['virtual_gateway_id']:
resp = check_dxgw_association(
client,
module,
gateway_id=gateway_id,
virtual_gateway_id=params['virtual_gateway_id'])
if not resp["directConnectGatewayAssociations"]:
# attach the dxgw to the supplied virtual_gateway_id
associate_direct_connect_gateway(client, module, gateway_id)
changed = True
# if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it.
else:
existing_dxgw = find_dx_gateway(client, module)
resp = check_dxgw_association(client, module, gateway_id=gateway_id)
if resp["directConnectGatewayAssociations"]:
for association in resp['directConnectGatewayAssociations']:
if association['associationState'] not in ['disassociating', 'disassociated']:
delete_association(
client,
module,
gateway_id=gateway_id,
virtual_gateway_id=association['virtualGatewayId'])
else:
# create a new dxgw
new_dxgw = create_dx_gateway(client, module)
changed = True
gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId']
# if a vpc-id was supplied, attempt to attach it to the dxgw
if params['virtual_gateway_id']:
associate_direct_connect_gateway(client, module, gateway_id)
resp = check_dxgw_association(client,
module,
gateway_id=gateway_id
)
if resp["directConnectGatewayAssociations"]:
changed = True
result = dx_gateway_info(client, gateway_id, module)
return changed, result
def ensure_absent(client, module):
# If an existing direct connect gateway matches our args
# then a match is considered to have been found and we will not create another dxgw.
changed = False
result = dict()
dx_gateway_id = module.params.get('direct_connect_gateway_id')
existing_dxgw = find_dx_gateway(client, module, dx_gateway_id)
if existing_dxgw is not None:
resp = check_dxgw_association(client, module,
gateway_id=dx_gateway_id)
if resp["directConnectGatewayAssociations"]:
for association in resp['directConnectGatewayAssociations']:
if association['associationState'] not in ['disassociating', 'disassociated']:
delete_association(client, module,
gateway_id=dx_gateway_id,
virtual_gateway_id=association['virtualGatewayId'])
# wait for deleting association
timeout = time.time() + module.params.get('wait_timeout')
while time.time() < timeout:
resp = check_dxgw_association(client,
module,
gateway_id=dx_gateway_id)
if resp["directConnectGatewayAssociations"] != []:
time.sleep(15)
else:
break
try:
resp = client.delete_direct_connect_gateway(
directConnectGatewayId=dx_gateway_id
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
result = resp['directConnectGateway']
return changed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']),
name=dict(),
amazon_asn=dict(),
virtual_gateway_id=dict(),
direct_connect_gateway_id=dict(),
wait_timeout=dict(type='int', default=320)))
required_if = [('state', 'present', ['name', 'amazon_asn']),
('state', 'absent', ['direct_connect_gateway_id'])]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs)
if state == 'present':
(changed, results) = ensure_present(client, module)
elif state == 'absent':
changed = ensure_absent(client, module)
results = {}
module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,470 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_link_aggregation_group
short_description: Manage Direct Connect LAG bundles
description:
- Create, delete, or modify a Direct Connect link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect link aggregation group.
choices:
- present
- absent
type: str
required: true
name:
description:
- The name of the Direct Connect link aggregation group.
type: str
link_aggregation_group_id:
description:
- The ID of the Direct Connect link aggregation group.
type: str
num_connections:
description:
- The number of connections with which to initialize the link aggregation group.
type: int
min_links:
description:
- The minimum number of physical connections that must be operational for the LAG itself to be operational.
type: int
location:
description:
- The location of the link aggregation group.
type: str
bandwidth:
description:
- The bandwidth of the link aggregation group.
type: str
force_delete:
description:
- This allows the minimum number of links to be set to 0, any hosted connections disassociated,
and any virtual interfaces associated to the LAG deleted.
type: bool
connection_id:
description:
- A connection ID to link with the link aggregation group upon creation.
type: str
delete_with_disassociation:
description:
- To be used with I(state=absent) to delete connections after disassociating them with the LAG.
type: bool
wait:
description:
- Whether or not to wait for the operation to complete.
- May be useful when waiting for virtual interfaces to be deleted.
- The time to wait can be controlled by setting I(wait_timeout).
type: bool
wait_timeout:
description:
- The duration in seconds to wait if I(wait=true).
default: 120
type: int
"""
EXAMPLES = """
# create a Direct Connect connection
- aws_direct_connect_link_aggregation_group:
state: present
location: EqDC2
lag_id: dxlag-xxxxxxxx
bandwidth: 1Gbps
"""
RETURN = """
changed:
type: str
description: Whether or not the LAG has changed.
returned: always
aws_device:
type: str
description: The AWS Direct Connection endpoint that hosts the LAG.
sample: "EqSe2-1bwfvazist2k0"
returned: when I(state=present)
connections:
type: list
description: A list of connections bundled by this LAG.
sample:
"connections": [
{
"aws_device": "EqSe2-1bwfvazist2k0",
"bandwidth": "1Gbps",
"connection_id": "dxcon-fgzjah5a",
"connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
"connection_state": "down",
"lag_id": "dxlag-fgnsp4rq",
"location": "EqSe2",
"owner_account": "448830907657",
"region": "us-west-2"
}
]
returned: when I(state=present)
connections_bandwidth:
type: str
description: The individual bandwidth of the physical connections bundled by the LAG.
sample: "1Gbps"
returned: when I(state=present)
lag_id:
type: str
description: Unique identifier for the link aggregation group.
sample: "dxlag-fgnsp4rq"
returned: when I(state=present)
lag_name:
type: str
description: User-provided name for the link aggregation group.
returned: when I(state=present)
lag_state:
type: str
description: State of the LAG.
sample: "pending"
returned: when I(state=present)
location:
type: str
description: Where the connection is located.
sample: "EqSe2"
returned: when I(state=present)
minimum_links:
type: int
description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
returned: when I(state=present)
number_of_connections:
type: int
description: The number of physical connections bundled by the LAG.
returned: when I(state=present)
owner_account:
type: str
description: Owner account ID of the LAG.
returned: when I(state=present)
region:
type: str
description: The region in which the LAG exists.
returned: when I(state=present)
"""
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
get_aws_connection_info, boto3_conn, AWSRetry)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aws.direct_connect import (DirectConnectError,
delete_connection,
delete_virtual_interface,
disassociate_connection_and_lag)
import traceback
import time
try:
import botocore
except Exception:
pass
# handled by imported HAS_BOTO3
def lag_status(client, lag_id):
return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
def lag_exists(client, lag_id=None, lag_name=None, verify=True):
""" If verify=True, returns the LAG ID or None
If verify=False, returns the LAG's data (or an empty dict)
"""
try:
if lag_id:
response = client.describe_lags(lagId=lag_id)
else:
response = client.describe_lags()
except botocore.exceptions.ClientError as e:
if lag_id and verify:
return False
elif lag_id:
return {}
else:
failed_op = "Failed to describe DirectConnect link aggregation groups."
raise DirectConnectError(msg=failed_op,
last_traceback=traceback.format_exc(),
exception=e)
match = [] # List of LAG IDs that are exact matches
lag = [] # List of LAG data that are exact matches
# look for matching connections
if len(response.get('lags', [])) == 1 and lag_id:
if response['lags'][0]['lagState'] != 'deleted':
match.append(response['lags'][0]['lagId'])
lag.append(response['lags'][0])
else:
for each in response.get('lags', []):
if each['lagState'] != 'deleted':
if not lag_id:
if lag_name == each['lagName']:
match.append(each['lagId'])
else:
match.append(each['lagId'])
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
else:
if len(lag) == 1:
return lag[0]
else:
return {}
def create_lag(client, num_connections, location, bandwidth, name, connection_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
last_traceback=None,
exception="")
parameters = dict(numberOfConnections=num_connections,
location=location,
connectionsBandwidth=bandwidth,
lagName=name)
if connection_id:
parameters.update(connectionId=connection_id)
try:
lag = client.create_lag(**parameters)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
last_traceback=traceback.format_exc(),
exception=e)
return lag['lagId']
def delete_lag(client, lag_id):
try:
client.delete_lag(lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
def _update_lag(client, lag_id, lag_name, min_links):
params = {}
if min_links:
params.update(minimumLinks=min_links)
if lag_name:
params.update(lagName=lag_name)
client.update_lag(lagId=lag_id, **params)
def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
start = time.time()
if min_links and min_links > num_connections:
raise DirectConnectError(
msg="The number of connections {0} must be greater than the minimum number of links "
"{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
last_traceback=None,
exception=None
)
while True:
try:
_update_lag(client, lag_id, lag_name, min_links)
except botocore.exceptions.ClientError as e:
if wait and time.time() - start <= wait_timeout:
continue
msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
raise DirectConnectError(msg=msg,
last_traceback=traceback.format_exc(),
exception=e)
else:
break
def lag_changed(current_status, name, min_links):
""" Determines if a modifiable link aggregation group attribute has been modified. """
return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
exists = lag_exists(client, lag_id, lag_name)
if not exists and lag_id:
raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
last_traceback=None,
exception="")
# the connection is found; get the latest state and see if it needs to be updated
if exists:
lag_id = exists
latest_state = lag_status(client, lag_id)
if lag_changed(latest_state, lag_name, min_links):
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
return False, lag_id
# no connection found; create a new one
else:
lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
def describe_virtual_interfaces(client, lag_id):
try:
response = client.describe_virtual_interfaces(connectionId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
return response.get('virtualInterfaces', [])
def get_connections_and_virtual_interfaces(client, lag_id):
virtual_interfaces = describe_virtual_interfaces(client, lag_id)
connections = lag_status(client, lag_id=lag_id).get('connections', [])
return virtual_interfaces, connections
def disassociate_vis(client, lag_id, virtual_interfaces):
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
try:
response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
lag_id = lag_exists(client, lag_id, lag_name)
if not lag_id:
return False
latest_status = lag_status(client, lag_id)
# determine the associated connections and virtual interfaces to disassociate
virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
# If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
"To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
"Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
"and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
last_traceback=None,
exception=None)
# update min_links to be 0 so we can remove the LAG
update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
# if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
for connection in connections:
disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
if delete_with_disassociation:
delete_connection(client, connection['connectionId'])
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
start_time = time.time()
while True:
try:
delete_lag(client, lag_id)
except DirectConnectError as e:
if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
continue
else:
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
link_aggregation_group_id=dict(),
num_connections=dict(type='int'),
min_links=dict(type='int'),
location=dict(),
bandwidth=dict(),
connection_id=dict(),
delete_with_disassociation=dict(type='bool', default=False),
force_delete=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=120),
))
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[('link_aggregation_group_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='directconnect', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
response = {}
try:
if state == 'present':
changed, lag_id = ensure_present(connection,
num_connections=module.params.get("num_connections"),
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
location=module.params.get("location"),
bandwidth=module.params.get("bandwidth"),
connection_id=module.params.get("connection_id"),
min_links=module.params.get("min_links"),
wait=module.params.get("wait"),
wait_timeout=module.params.get("wait_timeout"))
response = lag_status(connection, lag_id)
elif state == "absent":
changed = ensure_absent(connection,
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
force_delete=module.params.get("force_delete"),
delete_with_disassociation=module.params.get("delete_with_disassociation"),
wait=module.params.get('wait'),
wait_timeout=module.params.get('wait_timeout'))
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()

@ -1,500 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_direct_connect_virtual_interface
short_description: Manage Direct Connect virtual interfaces
description:
- Create, delete, or modify a Direct Connect public or private virtual interface.
version_added: "2.5"
author: "Sloane Hertel (@s-hertel)"
requirements:
- boto3
- botocore
options:
state:
description:
- The desired state of the Direct Connect virtual interface.
choices: [present, absent]
type: str
required: true
id_to_associate:
description:
- The ID of the link aggregation group or connection to associate with the virtual interface.
aliases: [link_aggregation_group_id, connection_id]
type: str
required: true
public:
description:
- The type of virtual interface.
type: bool
name:
description:
- The name of the virtual interface.
type: str
vlan:
description:
- The VLAN ID.
default: 100
type: int
bgp_asn:
description:
- The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
default: 65000
type: int
authentication_key:
description:
- The authentication key for BGP configuration.
type: str
amazon_address:
description:
- The amazon address CIDR with which to create the virtual interface.
type: str
customer_address:
description:
- The customer address CIDR with which to create the virtual interface.
type: str
address_type:
description:
- The type of IP address for the BGP peer.
type: str
cidr:
description:
- A list of route filter prefix CIDRs with which to create the public virtual interface.
type: list
elements: str
virtual_gateway_id:
description:
- The virtual gateway ID required for creating a private virtual interface.
type: str
virtual_interface_id:
description:
- The virtual interface ID.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
address_family:
description: The address family for the BGP peer.
returned: always
type: str
sample: ipv4
amazon_address:
description: IP address assigned to the Amazon interface.
returned: always
type: str
sample: 169.254.255.1/30
asn:
description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
returned: always
type: int
sample: 65000
auth_key:
description: The authentication key for BGP configuration.
returned: always
type: str
sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
bgp_peers:
description: A list of the BGP peers configured on this virtual interface.
returned: always
type: complex
contains:
address_family:
description: The address family for the BGP peer.
returned: always
type: str
sample: ipv4
amazon_address:
description: IP address assigned to the Amazon interface.
returned: always
type: str
sample: 169.254.255.1/30
asn:
description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
returned: always
type: int
sample: 65000
auth_key:
description: The authentication key for BGP configuration.
returned: always
type: str
sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
bgp_peer_state:
description: The state of the BGP peer (verifying, pending, available)
returned: always
type: str
sample: available
bgp_status:
description: The up/down state of the BGP peer.
returned: always
type: str
sample: up
customer_address:
description: IP address assigned to the customer interface.
returned: always
type: str
sample: 169.254.255.2/30
changed:
description: Indicated if the virtual interface has been created/modified/deleted
returned: always
type: bool
sample: false
connection_id:
description:
- The ID of the connection. This field is also used as the ID type for operations that
use multiple connection types (LAG, interconnect, and/or connection).
returned: always
type: str
sample: dxcon-fgb175av
customer_address:
description: IP address assigned to the customer interface.
returned: always
type: str
sample: 169.254.255.2/30
customer_router_config:
description: Information for generating the customer router configuration.
returned: always
type: str
location:
description: Where the connection is located.
returned: always
type: str
sample: EqDC2
owner_account:
description: The AWS account that will own the new virtual interface.
returned: always
type: str
sample: '123456789012'
route_filter_prefixes:
description: A list of routes to be advertised to the AWS network in this region (public virtual interface).
returned: always
type: complex
contains:
cidr:
description: A routes to be advertised to the AWS network in this region.
returned: always
type: str
sample: 54.227.92.216/30
virtual_gateway_id:
description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.
returned: when I(public=False)
type: str
sample: vgw-f3ce259a
virtual_interface_id:
description: The ID of the virtual interface.
returned: always
type: str
sample: dxvif-fh0w7cex
virtual_interface_name:
description: The name of the virtual interface assigned by the customer.
returned: always
type: str
sample: test_virtual_interface
virtual_interface_state:
description: State of the virtual interface (confirming, verifying, pending, available, down, rejected).
returned: always
type: str
sample: available
virtual_interface_type:
description: The type of virtual interface (private, public).
returned: always
type: str
sample: private
vlan:
description: The VLAN ID.
returned: always
type: int
sample: 100
'''
EXAMPLES = '''
---
- name: create an association between a LAG and connection
aws_direct_connect_virtual_interface:
state: present
name: "{{ name }}"
link_aggregation_group_id: LAG-XXXXXXXX
connection_id: dxcon-XXXXXXXX
- name: remove an association between a connection and virtual interface
aws_direct_connect_virtual_interface:
state: absent
connection_id: dxcon-XXXXXXXX
virtual_interface_id: dxv-XXXXXXXX
'''
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
# handled by AnsibleAWSModule
pass
def try_except_ClientError(failure_msg):
'''
Wrapper for boto3 calls that uses AWSRetry and handles exceptions
'''
def wrapper(f):
def run_func(*args, **kwargs):
try:
result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
except (ClientError, BotoCoreError) as e:
raise DirectConnectError(failure_msg, traceback.format_exc(), e)
return result
return run_func
return wrapper
def find_unique_vi(client, connection_id, virtual_interface_id, name):
'''
Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
If multiple matches are found False is returned. If no matches are found None is returned.
'''
# Get the virtual interfaces, filtering by the ID if provided.
vi_params = {}
if virtual_interface_id:
vi_params = {'virtualInterfaceId': virtual_interface_id}
virtual_interfaces = try_except_ClientError(
failure_msg="Failed to describe virtual interface")(
client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
# Remove deleting/deleted matches from the results.
virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
return exact_match(matching_virtual_interfaces)
def exact_match(virtual_interfaces):
'''
Returns the virtual interface ID if one was found,
None if the virtual interface ID needs to be created,
False if an exact match was not found
'''
if not virtual_interfaces:
return None
if len(virtual_interfaces) == 1:
return virtual_interfaces[0]['virtualInterfaceId']
else:
return False
def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
'''
Filters the available virtual interfaces to try to find a unique match
'''
# Filter by name if provided.
if name:
matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
if len(matching_by_name) == 1:
return matching_by_name
else:
matching_by_name = virtual_interfaces
# If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated)
if connection_id and len(matching_by_name) > 1:
matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id)
if len(matching_by_connection_id) == 1:
return matching_by_connection_id
else:
matching_by_connection_id = matching_by_name
return matching_by_connection_id
def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
'''
Return virtual interfaces that have the connection_id associated
'''
return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
def find_virtual_interface_by_name(virtual_interfaces, name):
'''
Return virtual interfaces that match the provided name
'''
return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
def vi_state(client, virtual_interface_id):
'''
Returns the state of the virtual interface.
'''
err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
return vi['virtualInterfaces'][0]
def assemble_params_for_creating_vi(params):
'''
Returns kwargs to use in the call to create the virtual interface
Params for public virtual interfaces:
virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
Params for private virtual interfaces:
virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
'''
public = params['public']
name = params['name']
vlan = params['vlan']
bgp_asn = params['bgp_asn']
auth_key = params['authentication_key']
amazon_addr = params['amazon_address']
customer_addr = params['customer_address']
family_addr = params['address_type']
cidr = params['cidr']
virtual_gateway_id = params['virtual_gateway_id']
parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
for name, value in opt_params.items():
if value:
parameters[name] = value
# virtual interface type specific parameters
if public and cidr:
parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
if not public:
parameters['virtualGatewayId'] = virtual_gateway_id
return parameters
def create_vi(client, public, associated_id, creation_params):
'''
:param public: a boolean
:param associated_id: a link aggregation group ID or connection ID to associate
with the virtual interface.
:param creation_params: a dict of parameters to use in the boto call
:return The ID of the created virtual interface
'''
err_msg = "Failed to create virtual interface"
if public:
vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
newPublicVirtualInterface=creation_params)
else:
vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
newPrivateVirtualInterface=creation_params)
return vi['virtualInterfaceId']
def modify_vi(client, virtual_interface_id, connection_id):
'''
Associate a new connection ID
'''
err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
connectionId=connection_id)
def needs_modification(client, virtual_interface_id, connection_id):
'''
Determine if the associated connection ID needs to be updated
'''
return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
def ensure_state(connection, module):
changed = False
state = module.params['state']
connection_id = module.params['id_to_associate']
public = module.params['public']
name = module.params['name']
virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
if virtual_interface_id is False:
module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
"and connection_id options if applicable to find a unique match.")
if state == 'present':
if not virtual_interface_id and module.params['virtual_interface_id']:
module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
elif not virtual_interface_id:
assembled_params = assemble_params_for_creating_vi(module.params)
virtual_interface_id = create_vi(connection, public, connection_id, assembled_params)
changed = True
if needs_modification(connection, virtual_interface_id, connection_id):
modify_vi(connection, virtual_interface_id, connection_id)
changed = True
latest_state = vi_state(connection, virtual_interface_id)
else:
if virtual_interface_id:
delete_virtual_interface(connection, virtual_interface_id)
changed = True
latest_state = {}
return changed, latest_state
def main():
argument_spec = dict(
state=dict(required=True, choices=['present', 'absent']),
id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
public=dict(type='bool'),
name=dict(),
vlan=dict(type='int', default=100),
bgp_asn=dict(type='int', default=65000),
authentication_key=dict(),
amazon_address=dict(),
customer_address=dict(),
address_type=dict(),
cidr=dict(type='list'),
virtual_gateway_id=dict(),
virtual_interface_id=dict()
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_one_of=[['virtual_interface_id', 'name']],
required_if=[['state', 'present', ['public']],
['public', False, ['virtual_gateway_id']],
['public', True, ['amazon_address']],
['public', True, ['customer_address']],
['public', True, ['cidr']]])
connection = module.client('directconnect')
try:
changed, latest_state = ensure_state(connection, module)
except DirectConnectError as e:
if e.exception:
module.fail_json_aws(exception=e.exception, msg=e.msg)
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
if __name__ == '__main__':
main()

@ -1,307 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_eks_cluster
short_description: Manage Elastic Kubernetes Service Clusters
description:
- Manage Elastic Kubernetes Service Clusters
version_added: "2.7"
author: Will Thames (@willthames)
options:
name:
description: Name of EKS cluster
required: True
type: str
version:
description: Kubernetes version - defaults to latest
type: str
role_arn:
description: ARN of IAM role used by the EKS cluster
type: str
subnets:
description: list of subnet IDs for the Kubernetes cluster
type: list
elements: str
security_groups:
description: list of security group names or IDs
type: list
elements: str
state:
description: desired state of the EKS cluster
choices:
- absent
- present
default: present
type: str
wait:
description: >-
Specifies whether the module waits until the cluster is active or deleted
before moving on. It takes "usually less than 10 minutes" per AWS documentation.
type: bool
default: false
wait_timeout:
description: >-
The duration in seconds to wait for the cluster to become active. Defaults
to 1200 seconds (20 minutes).
default: 1200
type: int
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create an EKS cluster
aws_eks_cluster:
name: my_cluster
version: 1.14
role_arn: my_eks_role
subnets:
- subnet-aaaa1111
security_groups:
- my_eks_sg
- sg-abcd1234
register: caller_facts
- name: Remove an EKS cluster
aws_eks_cluster:
name: my_cluster
wait: yes
state: absent
'''
RETURN = '''
arn:
description: ARN of the EKS cluster
returned: when state is present
type: str
sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster
certificate_authority:
description: Dictionary containing Certificate Authority Data for cluster
returned: after creation
type: complex
contains:
data:
description: Base-64 encoded Certificate Authority Data for cluster
returned: when the cluster has been created and is active
type: str
endpoint:
description: Kubernetes API server endpoint
returned: when the cluster has been created and is active
type: str
sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com
created_at:
description: Cluster creation date and time
returned: when state is present
type: str
sample: '2018-06-06T11:56:56.242000+00:00'
name:
description: EKS cluster name
returned: when state is present
type: str
sample: my-eks-cluster
resources_vpc_config:
description: VPC configuration of the cluster
returned: when state is present
type: complex
contains:
security_group_ids:
description: List of security group IDs
returned: always
type: list
sample:
- sg-abcd1234
- sg-aaaa1111
subnet_ids:
description: List of subnet IDs
returned: always
type: list
sample:
- subnet-abcdef12
- subnet-345678ab
- subnet-cdef1234
vpc_id:
description: VPC id
returned: always
type: str
sample: vpc-a1b2c3d4
role_arn:
description: ARN of the IAM role used by the cluster
returned: when state is present
type: str
sample: arn:aws:iam::111111111111:role/aws_eks_cluster_role
status:
description: status of the EKS cluster
returned: when state is present
type: str
sample:
- CREATING
- ACTIVE
version:
description: Kubernetes version of the cluster
returned: when state is present
type: str
sample: '1.10'
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
from ansible.module_utils.aws.waiters import get_waiter
try:
import botocore.exceptions
except ImportError:
pass # caught by AnsibleAWSModule
def ensure_present(client, module):
name = module.params.get('name')
subnets = module.params['subnets']
groups = module.params['security_groups']
wait = module.params.get('wait')
cluster = get_cluster(client, module)
try:
ec2 = module.client('ec2')
vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't lookup security groups")
if cluster:
if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets):
module.fail_json(msg="Cannot modify subnets of existing cluster")
if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups):
module.fail_json(msg="Cannot modify security groups of existing cluster")
if module.params.get('version') and module.params.get('version') != cluster['version']:
module.fail_json(msg="Cannot modify version of existing cluster")
if wait:
wait_until(client, module, 'cluster_active')
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster))
if module.check_mode:
module.exit_json(changed=True)
try:
params = dict(name=name,
roleArn=module.params['role_arn'],
resourcesVpcConfig=dict(
subnetIds=subnets,
securityGroupIds=groups),
clientRequestToken='ansible-create-%s' % name)
if module.params['version']:
params['version'] = module.params['version']
cluster = client.create_cluster(**params)['cluster']
except botocore.exceptions.EndpointConnectionError as e:
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
if wait:
wait_until(client, module, 'cluster_active')
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster))
def ensure_absent(client, module):
name = module.params.get('name')
existing = get_cluster(client, module)
wait = module.params.get('wait')
if not existing:
module.exit_json(changed=False)
if not module.check_mode:
try:
client.delete_cluster(name=module.params['name'])
except botocore.exceptions.EndpointConnectionError as e:
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
if wait:
wait_until(client, module, 'cluster_deleted')
module.exit_json(changed=True)
def get_cluster(client, module):
name = module.params.get('name')
try:
return client.describe_cluster(name=name)['cluster']
except is_boto3_error_code('ResourceNotFoundException'):
return None
except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
def wait_until(client, module, waiter_name='cluster_active'):
name = module.params.get('name')
wait_timeout = module.params.get('wait_timeout')
waiter = get_waiter(client, waiter_name)
attempts = 1 + int(wait_timeout / waiter.config.delay)
waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
def main():
argument_spec = dict(
name=dict(required=True),
version=dict(),
role_arn=dict(),
subnets=dict(type='list'),
security_groups=dict(type='list'),
state=dict(choices=['absent', 'present'], default='present'),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=1200, type='int')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
supports_check_mode=True,
)
if not module.botocore_at_least("1.10.32"):
module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32')
if (not module.botocore_at_least("1.12.38") and
module.params.get('state') == 'absent' and
module.params.get('wait')):
module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38')
client = module.client('eks')
if module.params.get('state') == 'present':
ensure_present(client, module)
else:
ensure_absent(client, module)
if __name__ == '__main__':
main()

@ -1,228 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_elasticbeanstalk_app
short_description: Create, update, and delete an elastic beanstalk application
version_added: "2.5"
description:
- Creates, updates, deletes beanstalk applications if app_name is provided.
options:
app_name:
description:
- Name of the beanstalk application you wish to manage.
aliases: [ 'name' ]
type: str
description:
description:
- The description of the application.
type: str
state:
description:
- Whether to ensure the application is present or absent.
default: present
choices: ['absent','present']
type: str
terminate_by_force:
description:
- When I(terminate_by_force=true), running environments will be terminated before deleting the application.
default: false
type: bool
author:
- Harpreet Singh (@hsingh)
- Stephen Granger (@viper233)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create or update an application
- aws_elasticbeanstalk_app:
app_name: Sample_App
description: "Hello World App"
state: present
# Delete application
- aws_elasticbeanstalk_app:
app_name: Sample_App
state: absent
'''
RETURN = '''
app:
description: Beanstalk application.
returned: always
type: dict
sample: {
"ApplicationName": "app-name",
"ConfigurationTemplates": [],
"DateCreated": "2016-12-28T14:50:03.185000+00:00",
"DateUpdated": "2016-12-28T14:50:03.185000+00:00",
"Description": "description",
"Versions": [
"1.0.0",
"1.0.1"
]
}
output:
description: Message indicating what change will occur.
returned: in check mode
type: str
sample: App is up-to-date
'''
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
def describe_app(ebs, app_name, module):
apps = list_apps(ebs, app_name, module)
return None if len(apps) != 1 else apps[0]
def list_apps(ebs, app_name, module):
try:
if app_name is not None:
apps = ebs.describe_applications(ApplicationNames=[app_name])
else:
apps = ebs.describe_applications()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not describe application")
return apps.get("Applications", [])
def check_app(ebs, app, module):
app_name = module.params['app_name']
description = module.params['description']
state = module.params['state']
terminate_by_force = module.params['terminate_by_force']
result = {}
if state == 'present' and app is None:
result = dict(changed=True, output="App would be created")
elif state == 'present' and app.get("Description", None) != description:
result = dict(changed=True, output="App would be updated", app=app)
elif state == 'present' and app.get("Description", None) == description:
result = dict(changed=False, output="App is up-to-date", app=app)
elif state == 'absent' and app is None:
result = dict(changed=False, output="App does not exist", app={})
elif state == 'absent' and app is not None:
result = dict(changed=True, output="App will be deleted", app=app)
elif state == 'absent' and app is not None and terminate_by_force is True:
result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app)
module.exit_json(**result)
def filter_empty(**kwargs):
retval = {}
for k, v in kwargs.items():
if v:
retval[k] = v
return retval
def main():
argument_spec = dict(
app_name=dict(aliases=['name'], type='str', required=False),
description=dict(),
state=dict(choices=['present', 'absent'], default='present'),
terminate_by_force=dict(type='bool', default=False, required=False)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
app_name = module.params['app_name']
description = module.params['description']
state = module.params['state']
terminate_by_force = module.params['terminate_by_force']
if app_name is None:
module.fail_json(msg='Module parameter "app_name" is required')
result = {}
ebs = module.client('elasticbeanstalk')
app = describe_app(ebs, app_name, module)
if module.check_mode:
check_app(ebs, app, module)
module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.')
if state == 'present':
if app is None:
try:
create_app = ebs.create_application(**filter_empty(ApplicationName=app_name,
Description=description))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not create application")
app = describe_app(ebs, app_name, module)
result = dict(changed=True, app=app)
else:
if app.get("Description", None) != description:
try:
if not description:
ebs.update_application(ApplicationName=app_name)
else:
ebs.update_application(ApplicationName=app_name, Description=description)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not update application")
app = describe_app(ebs, app_name, module)
result = dict(changed=True, app=app)
else:
result = dict(changed=False, app=app)
else:
if app is None:
result = dict(changed=False, output='Application not found', app={})
else:
try:
if terminate_by_force:
# Running environments will be terminated before deleting the application
ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force)
else:
ebs.delete_application(ApplicationName=app_name)
changed = True
except BotoCoreError as e:
module.fail_json_aws(e, msg="Cannot terminate app")
except ClientError as e:
if 'It is currently pending deletion.' not in e.response['Error']['Message']:
module.fail_json_aws(e, msg="Cannot terminate app")
else:
changed = False
result = dict(changed=changed, app=app)
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,337 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_glue_connection
short_description: Manage an AWS Glue connection
description:
- Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details.
version_added: "2.6"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
catalog_id:
description:
- The ID of the Data Catalog in which to create the connection. If none is supplied,
the AWS account ID is used by default.
type: str
connection_properties:
description:
- A dict of key-value pairs used as parameters for this connection.
- Required when I(state=present).
type: dict
connection_type:
description:
- The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
default: JDBC
choices: [ 'JDBC', 'SFTP' ]
type: str
description:
description:
- The description of the connection.
type: str
match_criteria:
description:
- A list of UTF-8 strings that specify the criteria that you can use in selecting this connection.
type: list
elements: str
name:
description:
- The name of the connection.
required: true
type: str
security_groups:
description:
- A list of security groups to be used by the connection. Use either security group name or ID.
type: list
elements: str
state:
description:
- Create or delete the AWS Glue connection.
required: true
choices: [ 'present', 'absent' ]
type: str
subnet_id:
description:
- The subnet ID used by the connection.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue connection
- aws_glue_connection:
name: my-glue-connection
connection_properties:
JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename
USERNAME: my-username
PASSWORD: my-password
state: present
# Delete an AWS Glue connection
- aws_glue_connection:
name: my-glue-connection
state: absent
'''
RETURN = '''
connection_properties:
description: A dict of key-value pairs used as parameters for this connection.
returned: when state is present
type: dict
sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'}
connection_type:
description: The type of the connection.
returned: when state is present
type: str
sample: JDBC
creation_time:
description: The time this connection definition was created.
returned: when state is present
type: str
sample: "2018-04-21T05:19:58.326000+00:00"
description:
description: Description of the job being defined.
returned: when state is present
type: str
sample: My first Glue job
last_updated_time:
description: The last time this connection definition was updated.
returned: when state is present
type: str
sample: "2018-04-21T05:19:58.326000+00:00"
match_criteria:
description: A list of criteria that can be used in selecting this connection.
returned: when state is present
type: list
sample: []
name:
description: The name of the connection definition.
returned: when state is present
type: str
sample: my-glue-connection
physical_connection_requirements:
description: A dict of physical connection requirements, such as VPC and SecurityGroup,
needed for making this connection successfully.
returned: when state is present
type: dict
sample: {'subnet-id':'subnet-aabbccddee'}
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
# Non-ansible imports
import copy
import time
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
def _get_glue_connection(connection, module):
"""
Get an AWS Glue connection based on name. If not found, return None.
:param connection: AWS boto3 glue connection
:param module: Ansible module
:return: boto3 Glue connection dict or None if not found
"""
connection_name = module.params.get("name")
connection_catalog_id = module.params.get("catalog_id")
params = {'Name': connection_name}
if connection_catalog_id is not None:
params['CatalogId'] = connection_catalog_id
try:
return connection.get_connection(**params)['Connection']
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == 'EntityNotFoundException':
return None
else:
raise e
def _compare_glue_connection_params(user_params, current_params):
"""
Compare Glue connection params. If there is a difference, return True immediately else return False
:param user_params: the Glue connection parameters passed by the user
:param current_params: the Glue connection parameters currently configured
:return: True if any parameter is mismatched else False
"""
# Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
# To counter this, add the key if it's missing with a blank value
if 'Description' not in current_params:
current_params['Description'] = ""
if 'MatchCriteria' not in current_params:
current_params['MatchCriteria'] = list()
if 'PhysicalConnectionRequirements' not in current_params:
current_params['PhysicalConnectionRequirements'] = dict()
current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = []
current_params['PhysicalConnectionRequirements']['SubnetId'] = ""
if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \
!= current_params['ConnectionProperties']:
return True
if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \
!= current_params['ConnectionType']:
return True
if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']:
return True
if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']):
return True
if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']:
if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \
!= set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']):
return True
if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \
!= current_params['PhysicalConnectionRequirements']['SubnetId']:
return True
return False
def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection):
"""
Create or update an AWS Glue connection
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_connection: a dict of AWS Glue connection parameters or None
:return:
"""
changed = False
params = dict()
params['ConnectionInput'] = dict()
params['ConnectionInput']['Name'] = module.params.get("name")
params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type")
params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties")
if module.params.get("catalog_id") is not None:
params['CatalogId'] = module.params.get("catalog_id")
if module.params.get("description") is not None:
params['ConnectionInput']['Description'] = module.params.get("description")
if module.params.get("match_criteria") is not None:
params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria")
if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None:
params['ConnectionInput']['PhysicalConnectionRequirements'] = dict()
if module.params.get("security_groups") is not None:
# Get security group IDs from names
security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True)
params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids
if module.params.get("subnet_id") is not None:
params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id")
# If glue_connection is not None then check if it needs to be modified, else create it
if glue_connection:
if _compare_glue_connection_params(params, glue_connection):
try:
# We need to slightly modify the params for an update
update_params = copy.deepcopy(params)
update_params['Name'] = update_params['ConnectionInput']['Name']
connection.update_connection(**update_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
else:
try:
connection.create_connection(**params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
# If changed, get the Glue connection again
if changed:
glue_connection = None
for i in range(10):
glue_connection = _get_glue_connection(connection, module)
if glue_connection is not None:
break
time.sleep(10)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection))
def delete_glue_connection(connection, module, glue_connection):
"""
Delete an AWS Glue connection
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_connection: a dict of AWS Glue connection parameters or None
:return:
"""
changed = False
params = {'ConnectionName': module.params.get("name")}
if module.params.get("catalog_id") is not None:
params['CatalogId'] = module.params.get("catalog_id")
if glue_connection:
try:
connection.delete_connection(**params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
module.exit_json(changed=changed)
def main():
argument_spec = (
dict(
catalog_id=dict(type='str'),
connection_properties=dict(type='dict'),
connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']),
description=dict(type='str'),
match_criteria=dict(type='list'),
name=dict(required=True, type='str'),
security_groups=dict(type='list'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
subnet_id=dict(type='str')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['connection_properties'])
]
)
connection_glue = module.client('glue')
connection_ec2 = module.client('ec2')
glue_connection = _get_glue_connection(connection_glue, module)
if module.params.get("state") == 'present':
create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection)
else:
delete_glue_connection(connection_glue, module, glue_connection)
if __name__ == '__main__':
main()

@ -1,373 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_glue_job
short_description: Manage an AWS Glue job
description:
- Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details.
version_added: "2.6"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
allocated_capacity:
description:
- The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs
can be allocated; the default is 10. A DPU is a relative measure of processing power that consists
of 4 vCPUs of compute capacity and 16 GB of memory.
type: int
command_name:
description:
- The name of the job command. This must be 'glueetl'.
default: glueetl
type: str
command_script_location:
description:
- The S3 path to a script that executes a job.
- Required when I(state=present).
type: str
connections:
description:
- A list of Glue connections used for this job.
type: list
elements: str
default_arguments:
description:
- A dict of default arguments for this job. You can specify arguments here that your own job-execution
script consumes, as well as arguments that AWS Glue itself consumes.
type: dict
description:
description:
- Description of the job being defined.
type: str
max_concurrent_runs:
description:
- The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when
this threshold is reached. The maximum value you can specify is controlled by a service limit.
type: int
max_retries:
description:
- The maximum number of times to retry this job if it fails.
type: int
name:
description:
- The name you assign to this job definition. It must be unique in your account.
required: true
type: str
role:
description:
- The name or ARN of the IAM role associated with this job.
- Required when I(state=present).
type: str
state:
description:
- Create or delete the AWS Glue job.
required: true
choices: [ 'present', 'absent' ]
type: str
timeout:
description:
- The job timeout in minutes.
type: int
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue job
- aws_glue_job:
command_script_location: s3bucket/script.py
name: my-glue-job
role: my-iam-role
state: present
# Delete an AWS Glue job
- aws_glue_job:
name: my-glue-job
state: absent
'''
RETURN = '''
allocated_capacity:
description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
that consists of 4 vCPUs of compute capacity and 16 GB of memory.
returned: when state is present
type: int
sample: 10
command:
description: The JobCommand that executes this job.
returned: when state is present
type: complex
contains:
name:
description: The name of the job command.
returned: when state is present
type: str
sample: glueetl
script_location:
description: Specifies the S3 path to a script that executes a job.
returned: when state is present
type: str
sample: mybucket/myscript.py
connections:
description: The connections used for this job.
returned: when state is present
type: dict
sample: "{ Connections: [ 'list', 'of', 'connections' ] }"
created_on:
description: The time and date that this job definition was created.
returned: when state is present
type: str
sample: "2018-04-21T05:19:58.326000+00:00"
default_arguments:
description: The default arguments for this job, specified as name-value pairs.
returned: when state is present
type: dict
sample: "{ 'mykey1': 'myvalue1' }"
description:
description: Description of the job being defined.
returned: when state is present
type: str
sample: My first Glue job
job_name:
description: The name of the AWS Glue job.
returned: always
type: str
sample: my-glue-job
execution_property:
description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
returned: always
type: complex
contains:
max_concurrent_runs:
description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is
returned when this threshold is reached. The maximum value you can specify is controlled by
a service limit.
returned: when state is present
type: int
sample: 1
last_modified_on:
description: The last point in time when this job definition was modified.
returned: when state is present
type: str
sample: "2018-04-21T05:19:58.326000+00:00"
max_retries:
description: The maximum number of times to retry this job after a JobRun fails.
returned: when state is present
type: int
sample: 5
name:
description: The name assigned to this job definition.
returned: when state is present
type: str
sample: my-glue-job
role:
description: The name or ARN of the IAM role associated with this job.
returned: when state is present
type: str
sample: my-iam-role
timeout:
description: The job timeout in minutes.
returned: when state is present
type: int
sample: 300
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
# Non-ansible imports
import copy
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
def _get_glue_job(connection, module, glue_job_name):
"""
Get an AWS Glue job based on name. If not found, return None.
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job_name: Name of Glue job to get
:return: boto3 Glue job dict or None if not found
"""
try:
return connection.get_job(JobName=glue_job_name)['Job']
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == 'EntityNotFoundException':
return None
else:
module.fail_json_aws(e)
def _compare_glue_job_params(user_params, current_params):
"""
Compare Glue job params. If there is a difference, return True immediately else return False
:param user_params: the Glue job parameters passed by the user
:param current_params: the Glue job parameters currently configured
:return: True if any parameter is mismatched else False
"""
# Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
# To counter this, add the key if it's missing with a blank value
if 'Description' not in current_params:
current_params['Description'] = ""
if 'DefaultArguments' not in current_params:
current_params['DefaultArguments'] = dict()
if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
return True
if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
return True
if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']):
return True
if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']):
return True
if 'Description' in user_params and user_params['Description'] != current_params['Description']:
return True
if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
return True
if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
return True
if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
return True
return False
def create_or_update_glue_job(connection, module, glue_job):
"""
Create or update an AWS Glue job
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job: a dict of AWS Glue job parameters or None
:return:
"""
changed = False
params = dict()
params['Name'] = module.params.get("name")
params['Role'] = module.params.get("role")
if module.params.get("allocated_capacity") is not None:
params['AllocatedCapacity'] = module.params.get("allocated_capacity")
if module.params.get("command_script_location") is not None:
params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
if module.params.get("connections") is not None:
params['Connections'] = {'Connections': module.params.get("connections")}
if module.params.get("default_arguments") is not None:
params['DefaultArguments'] = module.params.get("default_arguments")
if module.params.get("description") is not None:
params['Description'] = module.params.get("description")
if module.params.get("max_concurrent_runs") is not None:
params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
if module.params.get("max_retries") is not None:
params['MaxRetries'] = module.params.get("max_retries")
if module.params.get("timeout") is not None:
params['Timeout'] = module.params.get("timeout")
# If glue_job is not None then check if it needs to be modified, else create it
if glue_job:
if _compare_glue_job_params(params, glue_job):
try:
# Update job needs slightly modified params
update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
del update_params['JobUpdate']['Name']
connection.update_job(**update_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
else:
try:
connection.create_job(**params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
# If changed, get the Glue job again
if changed:
glue_job = _get_glue_job(connection, module, params['Name'])
module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job))
def delete_glue_job(connection, module, glue_job):
"""
Delete an AWS Glue job
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job: a dict of AWS Glue job parameters or None
:return:
"""
changed = False
if glue_job:
try:
connection.delete_job(JobName=glue_job['Name'])
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
module.exit_json(changed=changed)
def main():
argument_spec = (
dict(
allocated_capacity=dict(type='int'),
command_name=dict(type='str', default='glueetl'),
command_script_location=dict(type='str'),
connections=dict(type='list'),
default_arguments=dict(type='dict'),
description=dict(type='str'),
max_concurrent_runs=dict(type='int'),
max_retries=dict(type='int'),
name=dict(required=True, type='str'),
role=dict(type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
timeout=dict(type='int')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['role', 'command_script_location'])
]
)
connection = module.client('glue')
state = module.params.get("state")
glue_job = _get_glue_job(connection, module, module.params.get("name"))
if state == 'present':
create_or_update_glue_job(connection, module, glue_job)
else:
delete_glue_job(connection, module, glue_job)
if __name__ == '__main__':
main()

@ -1,248 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2018 Dennis Conrad for Sainsbury's
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_inspector_target
short_description: Create, Update and Delete Amazon Inspector Assessment
Targets
description: Creates, updates, or deletes Amazon Inspector Assessment Targets
and manages the required Resource Groups.
version_added: "2.6"
author: "Dennis Conrad (@dennisconrad)"
options:
name:
description:
- The user-defined name that identifies the assessment target. The name
must be unique within the AWS account.
required: true
type: str
state:
description:
- The state of the assessment target.
choices:
- absent
- present
default: present
type: str
tags:
description:
- Tags of the EC2 instances to be added to the assessment target.
- Required if C(state=present).
type: dict
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
'''
EXAMPLES = '''
- name: Create my_target Assessment Target
aws_inspector_target:
name: my_target
tags:
role: scan_target
- name: Update Existing my_target Assessment Target with Additional Tags
aws_inspector_target:
name: my_target
tags:
env: dev
role: scan_target
- name: Delete my_target Assessment Target
aws_inspector_target:
name: my_target
state: absent
'''
RETURN = '''
arn:
description: The ARN that specifies the Amazon Inspector assessment target.
returned: success
type: str
sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1"
created_at:
description: The time at which the assessment target was created.
returned: success
type: str
sample: "2018-01-29T13:48:51.958000+00:00"
name:
description: The name of the Amazon Inspector assessment target.
returned: success
type: str
sample: "my_target"
resource_group_arn:
description: The ARN that specifies the resource group that is associated
with the assessment target.
returned: success
type: str
sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8"
tags:
description: The tags of the resource group that is associated with the
assessment target.
returned: success
type: list
sample: {"role": "scan_target", "env": "dev"}
updated_at:
description: The time at which the assessment target was last updated.
returned: success
type: str
sample: "2018-01-29T13:48:51.958000+00:00"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry
from ansible.module_utils.ec2 import (
ansible_dict_to_boto3_tag_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
compare_aws_tags,
)
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
tags=dict(type='dict'),
)
required_if = [['state', 'present', ['tags']]]
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=required_if,
)
name = module.params.get('name')
state = module.params.get('state').lower()
tags = module.params.get('tags')
if tags:
tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
client = module.client('inspector')
try:
existing_target_arn = client.list_assessment_targets(
filter={'assessmentTargetNamePattern': name},
).get('assessmentTargetArns')[0]
existing_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[existing_target_arn],
).get('assessmentTargets')[0]
)
existing_resource_group_arn = existing_target.get('resource_group_arn')
existing_resource_group_tags = client.describe_resource_groups(
resourceGroupArns=[existing_resource_group_arn],
).get('resourceGroups')[0].get('tags')
target_exists = True
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, msg="trying to retrieve targets")
except IndexError:
target_exists = False
if state == 'present' and target_exists:
ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags)
ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(
existing_resource_group_tags
)
tags_to_add, tags_to_remove = compare_aws_tags(
ansible_dict_tags,
ansible_dict_existing_tags
)
if not (tags_to_add or tags_to_remove):
existing_target.update({'tags': ansible_dict_existing_tags})
module.exit_json(changed=False, **existing_target)
else:
try:
updated_resource_group_arn = client.create_resource_group(
resourceGroupTags=tags,
).get('resourceGroupArn')
client.update_assessment_target(
assessmentTargetArn=existing_target_arn,
assessmentTargetName=name,
resourceGroupArn=updated_resource_group_arn,
)
updated_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[existing_target_arn],
).get('assessmentTargets')[0]
)
updated_target.update({'tags': ansible_dict_tags})
module.exit_json(changed=True, **updated_target),
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, msg="trying to update target")
elif state == 'present' and not target_exists:
try:
new_resource_group_arn = client.create_resource_group(
resourceGroupTags=tags,
).get('resourceGroupArn')
new_target_arn = client.create_assessment_target(
assessmentTargetName=name,
resourceGroupArn=new_resource_group_arn,
).get('assessmentTargetArn')
new_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[new_target_arn],
).get('assessmentTargets')[0]
)
new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)})
module.exit_json(changed=True, **new_target)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, msg="trying to create target")
elif state == 'absent' and target_exists:
try:
client.delete_assessment_target(
assessmentTargetArn=existing_target_arn,
)
module.exit_json(changed=True)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, msg="trying to delete target")
elif state == 'absent' and not target_exists:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,433 +0,0 @@
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_kms_info
short_description: Gather information about AWS KMS keys
description:
- Gather information about AWS KMS keys including tags and grants
- This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author: "Will Thames (@willthames)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
The filters aren't natively supported by boto3, but are supported to provide similar
functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
C(tag:tagName)) are available, as are C(key-id) and C(alias)
type: dict
pending_deletion:
description: Whether to get full details (tags, grants etc.) of keys pending deletion
default: False
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all KMS keys
- aws_kms_info:
# Gather information about all keys with a Name tag
- aws_kms_info:
filters:
tag-key: Name
# Gather information about all keys with a specific name
- aws_kms_info:
filters:
"tag:Name": Example
'''
RETURN = '''
keys:
description: list of keys
type: complex
returned: always
contains:
key_id:
description: ID of key
type: str
returned: always
sample: abcd1234-abcd-1234-5678-ef1234567890
key_arn:
description: ARN of key
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
key_state:
description: The state of the key
type: str
returned: always
sample: PendingDeletion
key_usage:
description: The cryptographic operations for which you can use the key.
type: str
returned: always
sample: ENCRYPT_DECRYPT
origin:
description:
The source of the key's key material. When this value is C(AWS_KMS),
AWS KMS created the key material. When this value is C(EXTERNAL), the
key material was imported or the CMK lacks key material.
type: str
returned: always
sample: AWS_KMS
aws_account_id:
description: The AWS Account ID that the key belongs to
type: str
returned: always
sample: 1234567890123
creation_date:
description: Date of creation of the key
type: str
returned: always
sample: "2017-04-18T15:12:08.551000+10:00"
description:
description: Description of the key
type: str
returned: always
sample: "My Key for Protecting important stuff"
enabled:
description: Whether the key is enabled. True if C(KeyState) is true.
type: str
returned: always
sample: false
enable_key_rotation:
description: Whether the automatically key rotation every year is enabled.
type: bool
returned: always
sample: false
aliases:
description: list of aliases associated with the key
type: list
returned: always
sample:
- aws/acm
- aws/ebs
tags:
description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
type: dict
returned: always
sample:
Name: myKey
Purpose: protecting_stuff
policies:
description: list of policy documents for the keys. Empty when access is denied even if there are policies.
type: list
returned: always
sample:
Version: "2012-10-17"
Id: "auto-ebs-2"
Statement:
- Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
Effect: "Allow"
Principal:
AWS: "*"
Action:
- "kms:Encrypt"
- "kms:Decrypt"
- "kms:ReEncrypt*"
- "kms:GenerateDataKey*"
- "kms:CreateGrant"
- "kms:DescribeKey"
Resource: "*"
Condition:
StringEquals:
kms:CallerAccount: "111111111111"
kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- Sid: "Allow direct access to key metadata to the account"
Effect: "Allow"
Principal:
AWS: "arn:aws:iam::111111111111:root"
Action:
- "kms:Describe*"
- "kms:Get*"
- "kms:List*"
- "kms:RevokeGrant"
Resource: "*"
grants:
description: list of grants associated with a key
type: complex
returned: always
contains:
constraints:
description: Constraints on the encryption context that the grant allows.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
type: dict
returned: always
sample:
encryption_context_equals:
"aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
creation_date:
description: Date of creation of the grant
type: str
returned: always
sample: "2017-04-18T15:12:08+10:00"
grant_id:
description: The unique ID for the grant
type: str
returned: always
sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
grantee_principal:
description: The principal that receives the grant's permissions
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
issuing_account:
description: The AWS account under which the grant was issued
type: str
returned: always
sample: arn:aws:iam::01234567890:root
key_id:
description: The key ARN to which the grant applies.
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
name:
description: The friendly name that identifies the grant
type: str
returned: always
sample: xyz
operations:
description: The list of operations permitted by the grant
type: list
returned: always
sample:
- Decrypt
- RetireGrant
retiring_principal:
description: The principal that can retire the grant
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
# Caching lookup for aliases
_aliases = dict()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator('list_keys')
return paginator.paginate().build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_aliases_with_backoff(connection):
paginator = connection.get_paginator('list_aliases')
return paginator.paginate().build_full_result()
def get_kms_aliases_lookup(connection):
if not _aliases:
for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
# Not all aliases are actually associated with a key
if 'TargetKeyId' in alias:
# strip off leading 'alias/' and add it to key's aliases
if alias['TargetKeyId'] in _aliases:
_aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
else:
_aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
return _aliases
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_tags_with_backoff(connection, key_id, **kwargs):
return connection.list_resource_tags(KeyId=key_id, **kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_grants_with_backoff(connection, key_id, **kwargs):
params = dict(KeyId=key_id)
if kwargs.get('tokens'):
params['GrantTokens'] = kwargs['tokens']
paginator = connection.get_paginator('list_grants')
return paginator.paginate(**params).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_metadata_with_backoff(connection, key_id):
return connection.describe_key(KeyId=key_id)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_key_policies_with_backoff(connection, key_id):
paginator = connection.get_paginator('list_key_policies')
return paginator.paginate(KeyId=key_id).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_key_policy_with_backoff(connection, key_id, policy_name):
return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_enable_key_rotation_with_backoff(connection, key_id):
current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
return current_rotation_status.get('KeyRotationEnabled')
def get_kms_tags(connection, module, key_id):
# Handle pagination here as list_resource_tags does not have
# a paginator
kwargs = {}
tags = []
more = True
while more:
try:
tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
tags.extend(tag_response['Tags'])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key tags",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
tag_response = {}
if tag_response.get('NextMarker'):
kwargs['Marker'] = tag_response['NextMarker']
else:
more = False
return tags
def get_kms_policies(connection, module, key_id):
try:
policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
policy in policies]
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key policies",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
return []
def key_matches_filter(key, filtr):
if filtr[0] == 'key-id':
return filtr[1] == key['key_id']
if filtr[0] == 'tag-key':
return filtr[1] in key['tags']
if filtr[0] == 'tag-value':
return filtr[1] in key['tags'].values()
if filtr[0] == 'alias':
return filtr[1] in key['aliases']
if filtr[0].startswith('tag:'):
return key['tags'][filtr[0][4:]] == filtr[1]
def key_matches_filters(key, filters):
if not filters:
return True
else:
return all([key_matches_filter(key, filtr) for filtr in filters.items()])
def get_key_details(connection, module, key_id, tokens=None):
if not tokens:
tokens = []
try:
result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key metadata",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['KeyArn'] = result.pop('Arn')
try:
aliases = get_kms_aliases_lookup(connection)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain aliases",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['aliases'] = aliases.get(result['KeyId'], [])
result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
if module.params.get('pending_deletion'):
return camel_dict_to_snake_dict(result)
try:
result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key grants",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
tags = get_kms_tags(connection, module, key_id)
result = camel_dict_to_snake_dict(result)
result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
result['policies'] = get_kms_policies(connection, module, key_id)
return result
def get_kms_info(connection, module):
try:
keys = get_kms_keys_with_backoff(connection)['Keys']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain keys",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return [get_key_details(connection, module, key['KeyId']) for key in keys]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict'),
pending_deletion=dict(type='bool', default=False)
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'aws_kms_facts':
module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
all_keys = get_kms_info(connection, module)
module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
if __name__ == '__main__':
main()

@ -1,96 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: aws_region_info
short_description: Gather information about AWS regions.
description:
- Gather information about AWS regions.
- This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change.
version_added: '2.5'
author: 'Henrique Rodrigues (@Sodki)'
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
default: {}
type: dict
extends_documentation_fragment:
- aws
- ec2
requirements: [botocore, boto3]
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all regions
- aws_region_info:
# Gather information about a single region
- aws_region_info:
filters:
region-name: eu-west-1
'''
RETURN = '''
regions:
returned: on success
description: >
Regions that match the provided filters. Each element consists of a dict with all the information related
to that region.
type: list
sample: "[{
'endpoint': 'ec2.us-west-1.amazonaws.com',
'region_name': 'us-west-1'
}]"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
def main():
argument_spec = dict(
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if module._name == 'aws_region_facts':
module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", version='2.13')
connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
# Replace filter key underscores with dashes, for compatibility
sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
try:
regions = connection.describe_regions(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe regions.")
module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
if __name__ == '__main__':
main()

@ -1,119 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_s3_bucket_info
short_description: Lists S3 buckets in AWS
requirements:
- boto3 >= 1.4.4
- python >= 2.6
description:
- Lists S3 buckets in AWS
- This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(aws_s3_bucket_info) module no longer returns C(ansible_facts)!
version_added: "2.4"
author: "Gerben Geijteman (@hyperized)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: Only AWS S3 is currently supported
# Lists all s3 buckets
- aws_s3_bucket_info:
register: result
- name: List buckets
debug:
msg: "{{ result['buckets'] }}"
'''
RETURN = '''
buckets:
description: "List of buckets"
returned: always
sample:
- creation_date: 2017-07-06 15:05:12 +00:00
name: my_bucket
type: list
'''
import traceback
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict,
get_aws_connection_info)
def get_bucket_list(module, connection):
"""
Return result of list_buckets json encoded
:param module:
:param connection:
:return:
"""
try:
buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
return buckets
def main():
"""
Get list of S3 buckets
:return:
"""
# Ensure we have an empty dict
result = {}
# Including ec2 argument spec
module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True)
is_old_facts = module._name == 'aws_s3_bucket_facts'
if is_old_facts:
module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
# Verify Boto3 is used
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
# Set up connection
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3)
connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url,
**aws_connect_params)
# Gather results
result['buckets'] = get_bucket_list(module, connection)
# Send exit
if is_old_facts:
module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
else:
module.exit_json(msg="Retrieved s3 info.", **result)
if __name__ == '__main__':
main()

@ -1,168 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_s3_cors
short_description: Manage CORS for S3 buckets in AWS
description:
- Manage CORS for S3 buckets in AWS
version_added: "2.5"
author: "Oyvind Saltvik (@fivethreeo)"
options:
name:
description:
- Name of the s3 bucket
required: true
type: str
rules:
description:
- Cors rules to put on the s3 bucket
type: list
state:
description:
- Create or remove cors on the s3 bucket
required: true
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a simple cors for s3 bucket
- aws_s3_cors:
name: mys3bucket
state: present
rules:
- allowed_origins:
- http://www.example.com/
allowed_methods:
- GET
- POST
allowed_headers:
- Authorization
expose_headers:
- x-amz-server-side-encryption
- x-amz-request-id
max_age_seconds: 30000
# Remove cors for s3 bucket
- aws_s3_cors:
name: mys3bucket
state: absent
'''
RETURN = '''
changed:
description: check to see if a change was made to the rules
returned: always
type: bool
sample: true
name:
description: name of bucket
returned: always
type: str
sample: 'bucket-name'
rules:
description: list of current rules
returned: always
type: list
sample: [
{
"allowed_headers": [
"Authorization"
],
"allowed_methods": [
"GET"
],
"allowed_origins": [
"*"
],
"max_age_seconds": 30000
}
]
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except Exception:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies
def create_or_update_bucket_cors(connection, module):
name = module.params.get("name")
rules = module.params.get("rules", [])
changed = False
try:
current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules']
except ClientError:
current_camel_rules = []
new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True)
# compare_policies() takes two dicts and makes them hashable for comparison
if compare_policies(new_camel_rules, current_camel_rules):
changed = True
if changed:
try:
cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name))
module.exit_json(changed=changed, name=name, rules=rules)
def destroy_bucket_cors(connection, module):
name = module.params.get("name")
changed = False
try:
cors = connection.delete_bucket_cors(Bucket=name)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name))
module.exit_json(changed=changed)
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', choices=['present', 'absent'], required=True)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client = module.client('s3')
state = module.params.get("state")
if state == 'present':
create_or_update_bucket_cors(client, module)
elif state == 'absent':
destroy_bucket_cors(client, module)
if __name__ == '__main__':
main()

@ -1,404 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, REY Remi
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aws_secret
short_description: Manage secrets stored in AWS Secrets Manager.
description:
- Create, update, and delete secrets stored in AWS Secrets Manager.
author: "REY Remi (@rrey)"
version_added: "2.8"
requirements: [ 'botocore>=1.10.0', 'boto3' ]
options:
name:
description:
- Friendly name for the secret you are creating.
required: true
type: str
state:
description:
- Whether the secret should be exist or not.
default: 'present'
choices: ['present', 'absent']
type: str
recovery_window:
description:
- Only used if state is absent.
- Specifies the number of days that Secrets Manager waits before it can delete the secret.
- If set to 0, the deletion is forced without recovery.
default: 30
type: int
description:
description:
- Specifies a user-provided description of the secret.
type: str
kms_key_id:
description:
- Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be
used to encrypt the `secret_string` or `secret_binary` values in the versions stored in this secret.
type: str
secret_type:
description:
- Specifies the type of data that you want to encrypt.
choices: ['binary', 'string']
default: 'string'
type: str
secret:
description:
- Specifies string or binary data that you want to encrypt and store in the new version of the secret.
default: ""
type: str
tags:
description:
- Specifies a list of user-defined tags that are attached to the secret.
type: dict
rotation_lambda:
description:
- Specifies the ARN of the Lambda function that can rotate the secret.
type: str
rotation_interval:
description:
- Specifies the number of days between automatic scheduled rotations of the secret.
default: 30
type: int
extends_documentation_fragment:
- ec2
- aws
'''
EXAMPLES = r'''
- name: Add string to AWS Secrets Manager
aws_secret:
name: 'test_secret_string'
state: present
secret_type: 'string'
secret: "{{ super_secret_string }}"
- name: remove string from AWS Secrets Manager
aws_secret:
name: 'test_secret_string'
state: absent
secret_type: 'string'
secret: "{{ super_secret_string }}"
'''
RETURN = r'''
secret:
description: The secret information
returned: always
type: complex
contains:
arn:
description: The ARN of the secret
returned: always
type: str
sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx
last_accessed_date:
description: The date the secret was last accessed
returned: always
type: str
sample: '2018-11-20T01:00:00+01:00'
last_changed_date:
description: The date the secret was last modified.
returned: always
type: str
sample: '2018-11-20T12:16:38.433000+01:00'
name:
description: The secret name.
returned: always
type: str
sample: my_secret
rotation_enabled:
description: The secret rotation status.
returned: always
type: bool
sample: false
version_ids_to_stages:
description: Provide the secret version ids and the associated secret stage.
returned: always
type: dict
sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] }
'''
from ansible.module_utils._text import to_bytes
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
class Secret(object):
"""An object representation of the Secret described by the self.module args"""
def __init__(self, name, secret_type, secret, description="", kms_key_id=None,
tags=None, lambda_arn=None, rotation_interval=None):
self.name = name
self.description = description
self.kms_key_id = kms_key_id
if secret_type == "binary":
self.secret_type = "SecretBinary"
else:
self.secret_type = "SecretString"
self.secret = secret
self.tags = tags or {}
self.rotation_enabled = False
if lambda_arn:
self.rotation_enabled = True
self.rotation_lambda_arn = lambda_arn
self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)}
@property
def create_args(self):
args = {
"Name": self.name
}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
args["KmsKeyId"] = self.kms_key_id
if self.tags:
args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
args[self.secret_type] = self.secret
return args
@property
def update_args(self):
args = {
"SecretId": self.name
}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
args["KmsKeyId"] = self.kms_key_id
args[self.secret_type] = self.secret
return args
@property
def boto3_tags(self):
return ansible_dict_to_boto3_tag_list(self.Tags)
def as_dict(self):
result = self.__dict__
result.pop("tags")
return snake_dict_to_camel_dict(result)
class SecretsManagerInterface(object):
"""An interface with SecretsManager"""
def __init__(self, module):
self.module = module
self.client = self.module.client('secretsmanager')
def get_secret(self, name):
try:
secret = self.client.describe_secret(SecretId=name)
except self.client.exceptions.ResourceNotFoundException:
secret = None
except Exception as e:
self.module.fail_json_aws(e, msg="Failed to describe secret")
return secret
def create_secret(self, secret):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
created_secret = self.client.create_secret(**secret.create_args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to create secret")
if secret.rotation_enabled:
response = self.update_rotation(secret)
created_secret["VersionId"] = response.get("VersionId")
return created_secret
def update_secret(self, secret):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
response = self.client.update_secret(**secret.update_args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to update secret")
return response
def restore_secret(self, name):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
response = self.client.restore_secret(SecretId=name)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to restore secret")
return response
def delete_secret(self, name, recovery_window):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
if recovery_window == 0:
response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True)
else:
response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to delete secret")
return response
def update_rotation(self, secret):
if secret.rotation_enabled:
try:
response = self.client.rotate_secret(
SecretId=secret.name,
RotationLambdaARN=secret.rotation_lambda_arn,
RotationRules=secret.rotation_rules)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
else:
try:
response = self.client.cancel_rotate_secret(SecretId=secret.name)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to cancel rotation")
return response
def tag_secret(self, secret_name, tags):
try:
self.client.tag_resource(SecretId=secret_name, Tags=tags)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret")
def untag_secret(self, secret_name, tag_keys):
try:
self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret")
def secrets_match(self, desired_secret, current_secret):
"""Compare secrets except tags and rotation
Args:
desired_secret: camel dict representation of the desired secret state.
current_secret: secret reference as returned by the secretsmanager api.
Returns: bool
"""
if desired_secret.description != current_secret.get("Description", ""):
return False
if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
return False
current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
if desired_secret.secret_type == 'SecretBinary':
desired_value = to_bytes(desired_secret.secret)
else:
desired_value = desired_secret.secret
if desired_value != current_secret_value.get(desired_secret.secret_type):
return False
return True
def rotation_match(desired_secret, current_secret):
"""Compare secrets rotation configuration
Args:
desired_secret: camel dict representation of the desired secret state.
current_secret: secret reference as returned by the secretsmanager api.
Returns: bool
"""
if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False):
return False
if desired_secret.rotation_enabled:
if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"):
return False
if desired_secret.rotation_rules != current_secret.get("RotationRules"):
return False
return True
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(required=True),
'state': dict(choices=['present', 'absent'], default='present'),
'description': dict(default=""),
'kms_key_id': dict(),
'secret_type': dict(choices=['binary', 'string'], default="string"),
'secret': dict(default=""),
'tags': dict(type='dict', default={}),
'rotation_lambda': dict(),
'rotation_interval': dict(type='int', default=30),
'recovery_window': dict(type='int', default=30),
},
supports_check_mode=True,
)
changed = False
state = module.params.get('state')
secrets_mgr = SecretsManagerInterface(module)
recovery_window = module.params.get('recovery_window')
secret = Secret(
module.params.get('name'),
module.params.get('secret_type'),
module.params.get('secret'),
description=module.params.get('description'),
kms_key_id=module.params.get('kms_key_id'),
tags=module.params.get('tags'),
lambda_arn=module.params.get('rotation_lambda'),
rotation_interval=module.params.get('rotation_interval')
)
current_secret = secrets_mgr.get_secret(secret.name)
if state == 'absent':
if current_secret:
if not current_secret.get("DeletedDate"):
result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
changed = True
elif current_secret.get("DeletedDate") and recovery_window == 0:
result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
changed = True
else:
result = "secret does not exist"
if state == 'present':
if current_secret is None:
result = secrets_mgr.create_secret(secret)
changed = True
else:
if current_secret.get("DeletedDate"):
secrets_mgr.restore_secret(secret.name)
changed = True
if not secrets_mgr.secrets_match(secret, current_secret):
result = secrets_mgr.update_secret(secret)
changed = True
if not rotation_match(secret, current_secret):
result = secrets_mgr.update_rotation(secret)
changed = True
current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', []))
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags)
if tags_to_add:
secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
changed = True
if tags_to_remove:
secrets_mgr.untag_secret(secret.name, tags_to_remove)
changed = True
result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
result.pop("response_metadata")
module.exit_json(changed=changed, secret=result)
if __name__ == '__main__':
main()

@ -1,546 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity
short_description: Manages SES email and domain identity
description:
- This module allows the user to manage verified email and domain identity for SES.
- This covers verifying and removing identities as well as setting up complaint, bounce
and delivery notification settings.
version_added: "2.5"
author: Ed Costello (@orthanc)
options:
identity:
description:
- This is the email address or domain to verify / delete.
- If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
required: true
type: str
state:
description: Whether to create(or update) or delete the identity.
default: present
choices: [ 'present', 'absent' ]
type: str
bounce_notifications:
description:
- Setup the SNS topic used to report bounce notifications.
- If omitted, bounce notifications will not be delivered to a SNS topic.
- If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
complaint_notifications:
description:
- Setup the SNS topic used to report complaint notifications.
- If omitted, complaint notifications will not be delivered to a SNS topic.
- If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
delivery_notifications:
description:
- Setup the SNS topic used to report delivery notifications.
- If omitted, delivery notifications will not be delivered to a SNS topic.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
feedback_forwarding:
description:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
default: True
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure example@example.com email identity exists
aws_ses_identity:
identity: example@example.com
state: present
- name: Delete example@example.com email identity
aws_ses_identity:
email: example@example.com
state: absent
- name: Ensure example.com domain identity exists
aws_ses_identity:
identity: example.com
state: present
# Create an SNS topic and send bounce and complaint notifications to it
# instead of emailing the identity owner
- name: Ensure complaints-topic exists
sns_topic:
name: "complaints-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Deliver feedback to topic instead of owner email
aws_ses_identity:
identity: example@example.com
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: True
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: False
feedback_forwarding: False
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
- name: Ensure delivery-notifications-topic exists
sns_topic:
name: "delivery-notifications-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Delivery notifications to topic
aws_ses_identity:
identity: example@example.com
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
'''
RETURN = '''
identity:
description: The identity being modified.
returned: success
type: str
sample: example@example.com
identity_arn:
description: The arn of the identity being modified.
returned: success
type: str
sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
verification_attributes:
description: The verification information for the identity.
returned: success
type: complex
sample: {
"verification_status": "Pending",
"verification_token": "...."
}
contains:
verification_status:
description: The verification status of the identity.
type: str
sample: "Pending"
verification_token:
description: The verification token for a domain identity.
type: str
notification_attributes:
description: The notification setup for the identity.
returned: success
type: complex
sample: {
"bounce_topic": "arn:aws:sns:....",
"complaint_topic": "arn:aws:sns:....",
"delivery_topic": "arn:aws:sns:....",
"forwarding_enabled": false,
"headers_in_bounce_notifications_enabled": true,
"headers_in_complaint_notifications_enabled": true,
"headers_in_delivery_notifications_enabled": true
}
contains:
bounce_topic:
description:
- The ARN of the topic bounce notifications are delivered to.
- Omitted if bounce notifications are not delivered to a topic.
type: str
complaint_topic:
description:
- The ARN of the topic complaint notifications are delivered to.
- Omitted if complaint notifications are not delivered to a topic.
type: str
delivery_topic:
description:
- The ARN of the topic delivery notifications are delivered to.
- Omitted if delivery notifications are not delivered to a topic.
type: str
forwarding_enabled:
description: Whether or not feedback forwarding is enabled.
type: bool
headers_in_bounce_notifications_enabled:
description: Whether or not headers are included in messages delivered to the bounce topic.
type: bool
headers_in_complaint_notifications_enabled:
description: Whether or not headers are included in messages delivered to the complaint topic.
type: bool
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info
import time
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
# just registered it. Suspect this is an eventual consistency issue on AWS side.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only after registering the identity.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
identity_verification = response['VerificationAttributes']
if identity in identity_verification:
break
time.sleep(retryDelay)
if identity not in identity_verification:
return None
return identity_verification[identity]
def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_notifications doesn't include the notifications when we've
# just registered the identity.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only when getting the current notification
# status for return.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
notification_attributes = response['NotificationAttributes']
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
# eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
#
# When this occurs, just return None and we'll assume no identity notification settings have been changed
# from the default which is reasonable if this is just eventual consistency on creation.
# See: https://github.com/ansible/ansible/issues/36065
if identity in notification_attributes:
break
else:
# Paranoia check for coding errors, we only requested one identity, so if we get a different one
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
identity,
notification_attributes.keys(),
)
)
time.sleep(retryDelay)
if identity not in notification_attributes:
return None
return notification_attributes[identity]
def desired_topic(module, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict:
return arg_dict.get('topic', None)
else:
return None
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
topic_key = notification_type + 'Topic'
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
current = None
elif topic_key in identity_notifications:
current = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
current = None
required = desired_topic(module, notification_type)
if current != required:
try:
if not module.check_mode:
connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
current = False
elif header_key in identity_notifications:
current = identity_notifications[header_key]
else:
# AWS API doc indicates that the headers in fields are optional. Unfortunately
# it's not clear on what this means. But it's a pretty safe assumption that it means
# headers are not included since most API consumers would interpret absence as false.
current = False
if arg_dict is not None and 'include_headers' in arg_dict:
required = arg_dict['include_headers']
else:
required = False
if current != required:
try:
if not module.check_mode:
connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_feedback_forwarding(connection, module, identity, identity_notifications):
if identity_notifications is None:
# AWS requires feedback forwarding to be enabled unless bounces and complaints
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
elif 'ForwardingEnabled' in identity_notifications:
current = identity_notifications['ForwardingEnabled']
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
required = module.params.get('feedback_forwarding')
if current != required:
try:
if not module.check_mode:
connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
return True
return False
def create_mock_notifications_response(module):
resp = {
"ForwardingEnabled": module.params.get('feedback_forwarding'),
}
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict is not None and 'topic' in arg_dict:
resp[notification_type + 'Topic'] = arg_dict['topic']
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if arg_dict is not None and 'include_headers' in arg_dict:
resp[header_key] = arg_dict['include_headers']
else:
resp[header_key] = False
return resp
def update_identity_notifications(connection, module):
identity = module.params.get('identity')
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
if changed or identity_notifications is None:
if module.check_mode:
identity_notifications = create_mock_notifications_response(module)
else:
identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
return changed, identity_notifications
def validate_params_for_identity_present(module):
if module.params.get('feedback_forwarding') is False:
if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
"feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
def create_or_update_identity(connection, module, region, account_id):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
try:
if not module.check_mode:
if '@' in identity:
connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
else:
connection.verify_domain_identity(Domain=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
if module.check_mode:
verification_attributes = {
"VerificationStatus": "Pending",
}
else:
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
verification_attributes=camel_dict_to_snake_dict(verification_attributes))
if verification_attributes is None:
module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
module.fail_json(msg='Unable to load identity notification attributes.')
identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
module.exit_json(
changed=changed,
identity=identity,
identity_arn=identity_arn,
verification_attributes=camel_dict_to_snake_dict(verification_attributes),
notification_attributes=camel_dict_to_snake_dict(notification_attributes),
)
def destroy_identity(connection, module):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
try:
if not module.check_mode:
connection.delete_identity(Identity=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
changed = True
module.exit_json(
changed=changed,
identity=identity,
)
def get_account_id(module):
sts = module.client('sts')
try:
caller_identity = sts.get_caller_identity()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
return caller_identity['Account']
def main():
module = AnsibleAWSModule(
argument_spec={
"identity": dict(required=True, type='str'),
"state": dict(default='present', choices=['present', 'absent']),
"bounce_notifications": dict(type='dict'),
"complaint_notifications": dict(type='dict'),
"delivery_notifications": dict(type='dict'),
"feedback_forwarding": dict(default=True, type='bool'),
},
supports_check_mode=True,
)
for notification_type in ('bounce', 'complaint', 'delivery'):
param_name = notification_type + '_notifications'
arg_dict = module.params.get(param_name)
if arg_dict:
extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
if extra_keys:
module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
region = get_aws_connection_info(module, boto3=True)[0]
account_id = get_account_id(module)
validate_params_for_identity_present(module)
create_or_update_identity(connection, module, region, account_id)
else:
destroy_identity(connection, module)
if __name__ == '__main__':
main()

@ -1,201 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity_policy
short_description: Manages SES sending authorization policies
description:
- This module allows the user to manage sending authorization policies associated with an SES identity (email or domain).
- SES authorization sending policies can be used to control what actors are able to send email
on behalf of the validated identity and what conditions must be met by the sent emails.
version_added: "2.6"
author: Ed Costello (@orthanc)
options:
identity:
description: |
The SES identity to attach or remove a policy from. This can be either the full ARN or just
the verified email or domain.
required: true
type: str
policy_name:
description: The name used to identify the policy within the scope of the identity it's attached to.
required: true
type: str
policy:
description: A properly formatted JSON sending authorization policy. Required when I(state=present).
type: json
state:
description: Whether to create(or update) or delete the authorization policy on the identity.
default: present
choices: [ 'present', 'absent' ]
type: str
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: add sending authorization policy to domain identity
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to email identity
aws_ses_identity_policy:
identity: example@example.com
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to identity using ARN
aws_ses_identity_policy:
identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: remove sending authorization policy
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
state: absent
'''
RETURN = '''
policies:
description: A list of all policies present on the identity after the operation.
returned: success
type: list
sample: [ExamplePolicy]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import compare_policies, AWSRetry
import json
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
def get_identity_policy(connection, module, identity, policy_name):
try:
response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
policies = response['Policies']
if policy_name in policies:
return policies[policy_name]
return None
def create_or_update_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
required_policy = module.params.get('policy')
required_policy_dict = json.loads(required_policy)
changed = False
policy = get_identity_policy(connection, module, identity, policy_name)
policy_dict = json.loads(policy) if policy else None
if compare_policies(policy_dict, required_policy_dict):
changed = True
try:
if not module.check_mode:
connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
# Load the list of applied policies to include in the response.
# In principle we should be able to just return the response, but given
# eventual consistency behaviours in AWS it's plausible that we could
# end up with a list that doesn't contain the policy we just added.
# So out of paranoia check for this case and if we're missing the policy
# just make sure it's present.
#
# As a nice side benefit this also means the return is correct in check mode
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name is not None and policy_name not in policies_present:
policies_present = list(policies_present)
policies_present.append(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def delete_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
changed = False
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name in policies_present:
try:
if not module.check_mode:
connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
changed = True
policies_present = list(policies_present)
policies_present.remove(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def main():
module = AnsibleAWSModule(
argument_spec={
'identity': dict(required=True, type='str'),
'state': dict(default='present', choices=['present', 'absent']),
'policy_name': dict(required=True, type='str'),
'policy': dict(type='json', default=None),
},
required_if=[['state', 'present', ['policy']]],
supports_check_mode=True,
)
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
create_or_update_identity_policy(connection, module)
else:
delete_identity_policy(connection, module)
if __name__ == '__main__':
main()

@ -1,254 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_ses_rule_set
short_description: Manages SES inbound receipt rule sets
description:
- The M(aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets
version_added: 2.8
author:
- "Ben Tomasik (@tomislacker)"
- "Ed Costello (@orthanc)"
requirements: [ boto3, botocore ]
options:
name:
description:
- The name of the receipt rule set.
required: True
type: str
state:
description:
- Whether to create (or update) or destroy the receipt rule set.
required: False
default: present
choices: ["absent", "present"]
type: str
active:
description:
- Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present).
- If omitted, the active rule set will not be changed.
- If C(True) then this rule set will be made active and all others inactive.
- if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set.
type: bool
required: False
force:
description:
- When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set).
type: bool
required: False
default: False
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- name: Create default rule set and activate it if not already
aws_ses_rule_set:
name: default-rule-set
state: present
active: yes
- name: Create some arbitrary rule set but do not activate it
aws_ses_rule_set:
name: arbitrary-rule-set
state: present
- name: Explicitly deactivate the default rule set leaving no active rule set
aws_ses_rule_set:
name: default-rule-set
state: present
active: no
- name: Remove an arbitrary inactive rule set
aws_ses_rule_set:
name: arbitrary-rule-set
state: absent
- name: Remove an ruleset even if we have to first deactivate it to remove it
aws_ses_rule_set:
name: default-rule-set
state: absent
force: yes
"""
RETURN = """
active:
description: if the SES rule set is active
returned: success if I(state) is C(present)
type: bool
sample: true
rule_sets:
description: The list of SES receipt rule sets that exist after any changes.
returned: success
type: list
sample: [{
"created_timestamp": "2018-02-25T01:20:32.690000+00:00",
"name": "default-rule-set"
}]
"""
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
def list_rule_sets(client, module):
try:
response = client.list_receipt_rule_sets(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't list rule sets.")
return response['RuleSets']
def rule_set_in(name, rule_sets):
return any([s for s in rule_sets if s['Name'] == name])
def ruleset_active(client, module, name):
try:
active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't get the active rule set.")
if active_rule_set is not None and 'Metadata' in active_rule_set:
return name == active_rule_set['Metadata']['Name']
else:
# Metadata was not set meaning there is no active rule set
return False
def deactivate_rule_set(client, module):
try:
# No ruleset name deactivates all rulesets
client.set_active_receipt_rule_set(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't set active rule set to None.")
def update_active_rule_set(client, module, name, desired_active):
check_mode = module.check_mode
active = ruleset_active(client, module, name)
changed = False
if desired_active is not None:
if desired_active and not active:
if not check_mode:
try:
client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
changed = True
active = True
elif not desired_active and active:
if not check_mode:
deactivate_rule_set(client, module)
changed = True
active = False
return changed, active
def create_or_update_rule_set(client, module):
name = module.params.get('name')
check_mode = module.check_mode
changed = False
rule_sets = list_rule_sets(client, module)
if not rule_set_in(name, rule_sets):
if not check_mode:
try:
client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
changed = True
rule_sets = list(rule_sets)
rule_sets.append({
'Name': name,
})
(active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
changed |= active_changed
module.exit_json(
changed=changed,
active=active,
rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
)
def remove_rule_set(client, module):
name = module.params.get('name')
check_mode = module.check_mode
changed = False
rule_sets = list_rule_sets(client, module)
if rule_set_in(name, rule_sets):
active = ruleset_active(client, module, name)
if active and not module.params.get('force'):
module.fail_json(
msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
error={
"code": "CannotDelete",
"message": "Cannot delete active rule set: {0}".format(name),
}
)
if not check_mode:
if active and module.params.get('force'):
deactivate_rule_set(client, module)
try:
client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
changed = True
rule_sets = [x for x in rule_sets if x['Name'] != name]
module.exit_json(
changed=changed,
rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
)
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
active=dict(type='bool'),
force=dict(type='bool', default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
state = module.params.get('state')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
if state == 'absent':
remove_rule_set(client, module)
else:
create_or_update_rule_set(client, module)
if __name__ == '__main__':
main()

@ -1,361 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This module is sponsored by E.T.A.I. (www.etai.fr)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_sgw_info
short_description: Fetch AWS Storage Gateway information
description:
- Fetch AWS Storage Gateway information
- This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change.
version_added: "2.6"
requirements: [ boto3 ]
author: Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
options:
gather_local_disks:
description:
- Gather local disks attached to the storage gateway.
type: bool
required: false
default: true
gather_tapes:
description:
- Gather tape information for storage gateways in tape mode.
type: bool
required: false
default: true
gather_file_shares:
description:
- Gather file share information for storage gateways in s3 mode.
type: bool
required: false
default: true
gather_volumes:
description:
- Gather volume information for storage gateways in iSCSI (cached & stored) modes.
type: bool
required: false
default: true
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
gateways:
description: list of gateway objects
returned: always
type: complex
contains:
gateway_arn:
description: "Storage Gateway ARN"
returned: always
type: str
sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888"
gateway_id:
description: "Storage Gateway ID"
returned: always
type: str
sample: "sgw-9999F888"
gateway_name:
description: "Storage Gateway friendly name"
returned: always
type: str
sample: "my-sgw-01"
gateway_operational_state:
description: "Storage Gateway operational state"
returned: always
type: str
sample: "ACTIVE"
gateway_type:
description: "Storage Gateway type"
returned: always
type: str
sample: "FILE_S3"
file_shares:
description: "Storage gateway file shares"
returned: when gateway_type == "FILE_S3"
type: complex
contains:
file_share_arn:
description: "File share ARN"
returned: always
type: str
sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88"
file_share_id:
description: "File share ID"
returned: always
type: str
sample: "share-AF999C88"
file_share_status:
description: "File share status"
returned: always
type: str
sample: "AVAILABLE"
tapes:
description: "Storage Gateway tapes"
returned: when gateway_type == "VTL"
type: complex
contains:
tape_arn:
description: "Tape ARN"
returned: always
type: str
sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88"
tape_barcode:
description: "Tape ARN"
returned: always
type: str
sample: "tape-AF999C88"
tape_size_in_bytes:
description: "Tape ARN"
returned: always
type: int
sample: 555887569
tape_status:
description: "Tape ARN"
returned: always
type: str
sample: "AVAILABLE"
local_disks:
description: "Storage gateway local disks"
returned: always
type: complex
contains:
disk_allocation_type:
description: "Disk allocation type"
returned: always
type: str
sample: "CACHE STORAGE"
disk_id:
description: "Disk ID on the system"
returned: always
type: str
sample: "pci-0000:00:1f.0"
disk_node:
description: "Disk parent block device"
returned: always
type: str
sample: "/dev/sdb"
disk_path:
description: "Disk path used for the cache"
returned: always
type: str
sample: "/dev/nvme1n1"
disk_size_in_bytes:
description: "Disk size in bytes"
returned: always
type: int
sample: 107374182400
disk_status:
description: "Disk status"
returned: always
type: str
sample: "present"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: "Get AWS storage gateway information"
aws_sgw_info:
- name: "Get AWS storage gateway information for region eu-west-3"
aws_sgw_info:
region: eu-west-3
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
class SGWInformationManager(object):
def __init__(self, client, module):
self.client = client
self.module = module
self.name = self.module.params.get('name')
def fetch(self):
gateways = self.list_gateways()
for gateway in gateways:
if self.module.params.get('gather_local_disks'):
self.list_local_disks(gateway)
# File share gateway
if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
self.list_gateway_file_shares(gateway)
# Volume tape gateway
elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
self.list_gateway_vtl(gateway)
# iSCSI gateway
elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
self.list_gateway_volumes(gateway)
self.module.exit_json(gateways=gateways)
"""
List all storage gateways for the AWS endpoint.
"""
def list_gateways(self):
try:
paginator = self.client.get_paginator('list_gateways')
response = paginator.paginate(
PaginationConfig={
'PageSize': 100,
}
).build_full_result()
gateways = []
for gw in response["Gateways"]:
gateways.append(camel_dict_to_snake_dict(gw))
return gateways
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
"""
Read file share objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
@staticmethod
def _read_gateway_fileshare_response(fileshares, aws_reponse):
for share in aws_reponse["FileShareInfoList"]:
share_obj = camel_dict_to_snake_dict(share)
if "gateway_arn" in share_obj:
del share_obj["gateway_arn"]
fileshares.append(share_obj)
return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
"""
List file shares attached to AWS storage gateway when in S3 mode.
"""
def list_gateway_file_shares(self, gateway):
try:
response = self.client.list_file_shares(
GatewayARN=gateway["gateway_arn"],
Limit=100
)
gateway["file_shares"] = []
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
while marker is not None:
response = self.client.list_file_shares(
GatewayARN=gateway["gateway_arn"],
Marker=marker,
Limit=100
)
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
"""
List storage gateway local disks
"""
def list_local_disks(self, gateway):
try:
gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
"""
Read tape objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
@staticmethod
def _read_gateway_tape_response(tapes, aws_response):
for tape in aws_response["TapeInfos"]:
tape_obj = camel_dict_to_snake_dict(tape)
if "gateway_arn" in tape_obj:
del tape_obj["gateway_arn"]
tapes.append(tape_obj)
return aws_response["Marker"] if "Marker" in aws_response else None
"""
List VTL & VTS attached to AWS storage gateway in VTL mode
"""
def list_gateway_vtl(self, gateway):
try:
response = self.client.list_tapes(
Limit=100
)
gateway["tapes"] = []
marker = self._read_gateway_tape_response(gateway["tapes"], response)
while marker is not None:
response = self.client.list_tapes(
Marker=marker,
Limit=100
)
marker = self._read_gateway_tape_response(gateway["tapes"], response)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
"""
List volumes attached to AWS storage gateway in CACHED or STORAGE mode
"""
def list_gateway_volumes(self, gateway):
try:
paginator = self.client.get_paginator('list_volumes')
response = paginator.paginate(
GatewayARN=gateway["gateway_arn"],
PaginationConfig={
'PageSize': 100,
}
).build_full_result()
gateway["volumes"] = []
for volume in response["VolumeInfos"]:
volume_obj = camel_dict_to_snake_dict(volume)
if "gateway_arn" in volume_obj:
del volume_obj["gateway_arn"]
if "gateway_id" in volume_obj:
del volume_obj["gateway_id"]
gateway["volumes"].append(volume_obj)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
def main():
argument_spec = dict(
gather_local_disks=dict(type='bool', default=True),
gather_tapes=dict(type='bool', default=True),
gather_file_shares=dict(type='bool', default=True),
gather_volumes=dict(type='bool', default=True)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if module._name == 'aws_sgw_facts':
module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", version='2.13')
client = module.client('storagegateway')
if client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.')
SGWInformationManager(client, module).fetch()
if __name__ == '__main__':
main()

@ -1,262 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_ssm_parameter_store
short_description: Manage key-value pairs in aws parameter store.
description:
- Manage key-value pairs in aws parameter store.
version_added: "2.5"
options:
name:
description:
- Parameter key name.
required: true
type: str
description:
description:
- Parameter key description.
required: false
type: str
value:
description:
- Parameter value.
required: false
type: str
state:
description:
- Creates or modifies an existing parameter.
- Deletes a parameter.
required: false
choices: ['present', 'absent']
default: present
type: str
string_type:
description:
- Parameter String type.
required: false
choices: ['String', 'StringList', 'SecureString']
default: String
type: str
decryption:
description:
- Work with SecureString type to get plain text secrets
type: bool
required: false
default: true
key_id:
description:
- AWS KMS key to decrypt the secrets.
- The default key (C(alias/aws/ssm)) is automatically generated the first
time it's requested.
required: false
default: alias/aws/ssm
type: str
overwrite_value:
description:
- Option to overwrite an existing value if it already exists.
required: false
version_added: "2.6"
choices: ['never', 'changed', 'always']
default: changed
type: str
author:
- Nathan Webster (@nathanwebsterdotme)
- Bill Wang (@ozbillwang) <ozbillwang@gmail.com>
- Michael De La Rue (@mikedlr)
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore, boto3 ]
'''
EXAMPLES = '''
- name: Create or update key/value pair in aws parameter store
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
value: "World"
- name: Delete the key
aws_ssm_parameter_store:
name: "Hello"
state: absent
- name: Create or update secure key/value pair with default kms key (aws/ssm)
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
value: "World"
- name: Create or update secure key/value pair with nominated kms key
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
key_id: "alias/demo"
value: "World"
- name: Always update a parameter store value and create a new version
aws_ssm_parameter_store:
name: "overwrite_example"
description: "This example will always overwrite the value"
string_type: "String"
value: "Test1234"
overwrite_value: "always"
- name: recommend to use with aws_ssm lookup plugin
debug: msg="{{ lookup('aws_ssm', 'hello') }}"
'''
RETURN = '''
put_parameter:
description: Add one or more parameters to the system.
returned: success
type: dict
delete_parameter:
description: Delete a parameter from the system.
returned: success
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
try:
from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
def update_parameter(client, module, args):
changed = False
response = {}
try:
response = client.put_parameter(**args)
changed = True
except ClientError as e:
module.fail_json_aws(e, msg="setting parameter")
return changed, response
def create_update_parameter(client, module):
changed = False
existing_parameter = None
response = {}
args = dict(
Name=module.params.get('name'),
Value=module.params.get('value'),
Type=module.params.get('string_type')
)
if (module.params.get('overwrite_value') in ("always", "changed")):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
if module.params.get('description'):
args.update(Description=module.params.get('description'))
if module.params.get('string_type') == 'SecureString':
args.update(KeyId=module.params.get('key_id'))
try:
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
except Exception:
pass
if existing_parameter:
if (module.params.get('overwrite_value') == 'always'):
(changed, response) = update_parameter(client, module, args)
elif (module.params.get('overwrite_value') == 'changed'):
if existing_parameter['Parameter']['Type'] != args['Type']:
(changed, response) = update_parameter(client, module, args)
if existing_parameter['Parameter']['Value'] != args['Value']:
(changed, response) = update_parameter(client, module, args)
if args.get('Description'):
# Description field not available from get_parameter function so get it from describe_parameters
describe_existing_parameter = None
try:
describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
describe_existing_parameter = describe_existing_parameter_paginator.paginate(
Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="getting description value")
if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
(changed, response) = update_parameter(client, module, args)
else:
(changed, response) = update_parameter(client, module, args)
return changed, response
def delete_parameter(client, module):
response = {}
try:
response = client.delete_parameter(
Name=module.params.get('name')
)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
return False, {}
module.fail_json_aws(e, msg="deleting parameter")
return True, response
def setup_client(module):
connection = module.client('ssm')
return connection
def setup_module_object():
argument_spec = dict(
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
decryption=dict(default=True, type='bool'),
key_id=dict(default="alias/aws/ssm"),
overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
)
def main():
module = setup_module_object()
state = module.params.get('state')
client = setup_client(module)
invocations = {
"present": create_update_parameter,
"absent": delete_parameter,
}
(changed, response) = invocations[state](client, module)
module.exit_json(changed=changed, response=response)
if __name__ == '__main__':
main()

@ -1,232 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_step_functions_state_machine
short_description: Manage AWS Step Functions state machines
version_added: "2.10"
description:
- Create, update and delete state machines in AWS Step Functions.
- Calling the module in C(state=present) for an existing AWS Step Functions state machine
will attempt to update the state machine definition, IAM Role, or tags with the provided data.
options:
name:
description:
- Name of the state machine
required: true
type: str
definition:
description:
- The Amazon States Language definition of the state machine. See
U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
information on the Amazon States Language.
- "This parameter is required when C(state=present)."
type: json
role_arn:
description:
- The ARN of the IAM Role that will be used by the state machine for its executions.
- "This parameter is required when C(state=present)."
type: str
state:
description:
- Desired state for the state machine
default: present
choices: [ present, absent ]
type: str
tags:
description:
- A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
type: dict
purge_tags:
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
If the I(tags) parameter is not set then tags will not be modified.
default: yes
type: bool
extends_documentation_fragment:
- aws
- ec2
author:
- Tom De Keyser (@tdekeyser)
'''
EXAMPLES = '''
# Create a new AWS Step Functions state machine
- name: Setup HelloWorld state machine
aws_step_functions_state_machine:
name: "HelloWorldStateMachine"
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
tags:
project: helloWorld
# Update an existing state machine
- name: Change IAM Role and tags of HelloWorld state machine
aws_step_functions_state_machine:
name: HelloWorldStateMachine
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
tags:
otherTag: aDifferentTag
# Remove the AWS Step Functions state machine
- name: Delete HelloWorld state machine
aws_step_functions_state_machine:
name: HelloWorldStateMachine
state: absent
'''
RETURN = '''
state_machine_arn:
description: ARN of the AWS Step Functions state machine
type: str
returned: always
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def manage_state_machine(state, sfn_client, module):
state_machine_arn = get_state_machine_arn(sfn_client, module)
if state == 'present':
if state_machine_arn is None:
create(sfn_client, module)
else:
update(state_machine_arn, sfn_client, module)
elif state == 'absent':
if state_machine_arn is not None:
remove(state_machine_arn, sfn_client, module)
check_mode(module, msg='State is up-to-date.')
module.exit_json(changed=False)
def create(sfn_client, module):
check_mode(module, msg='State machine would be created.', changed=True)
tags = module.params.get('tags')
sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
state_machine = sfn_client.create_state_machine(
name=module.params.get('name'),
definition=module.params.get('definition'),
roleArn=module.params.get('role_arn'),
tags=sfn_tags
)
module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
def remove(state_machine_arn, sfn_client, module):
check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
def update(state_machine_arn, sfn_client, module):
tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
sfn_client.update_state_machine(
stateMachineArn=state_machine_arn,
definition=module.params.get('definition'),
roleArn=module.params.get('role_arn')
)
sfn_client.untag_resource(
resourceArn=state_machine_arn,
tagKeys=tags_to_remove
)
sfn_client.tag_resource(
resourceArn=state_machine_arn,
tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
)
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
def compare_tags(state_machine_arn, sfn_client, module):
new_tags = module.params.get('tags')
current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
def params_changed(state_machine_arn, sfn_client, module):
"""
Check whether the state machine definition or IAM Role ARN is different
from the existing state machine parameters.
"""
current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
def get_state_machine_arn(sfn_client, module):
"""
Finds the state machine ARN based on the name parameter. Returns None if
there is no state machine with this name.
"""
target_name = module.params.get('name')
all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
for state_machine in all_state_machines:
if state_machine.get('name') == target_name:
return state_machine.get('stateMachineArn')
def check_mode(module, msg='', changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
name=dict(type='str', required=True),
definition=dict(type='json'),
role_arn=dict(type='str'),
state=dict(choices=['present', 'absent'], default='present'),
tags=dict(default=None, type='dict'),
purge_tags=dict(default=True, type='bool'),
)
module = AnsibleAWSModule(
argument_spec=module_args,
required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
supports_check_mode=True
)
sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
state = module.params.get('state')
try:
manage_state_machine(state, sfn_client, module)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to manage state machine')
if __name__ == '__main__':
main()

@ -1,197 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2019, Prasad Katti (@prasadkatti)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_step_functions_state_machine_execution
short_description: Start or stop execution of an AWS Step Functions state machine.
version_added: "2.10"
description:
- Start or stop execution of a state machine in AWS Step Functions.
options:
action:
description: Desired action (start or stop) for a state machine execution.
default: start
choices: [ start, stop ]
type: str
name:
description: Name of the execution.
type: str
execution_input:
description: The JSON input data for the execution.
type: json
default: {}
state_machine_arn:
description: The ARN of the state machine that will be executed.
type: str
execution_arn:
description: The ARN of the execution you wish to stop.
type: str
cause:
description: A detailed explanation of the cause for stopping the execution.
type: str
default: ''
error:
description: The error code of the failure to pass in when stopping the execution.
type: str
default: ''
extends_documentation_fragment:
- aws
- ec2
author:
- Prasad Katti (@prasadkatti)
'''
EXAMPLES = '''
- name: Start an execution of a state machine
aws_step_functions_state_machine_execution:
name: an_execution_name
execution_input: '{ "IsHelloWorldExample": true }'
state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
- name: Stop an execution of a state machine
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
cause: "cause of task failure"
error: "error code of the failure"
'''
RETURN = '''
execution_arn:
description: ARN of the AWS Step Functions state machine execution.
type: str
returned: if action == start and changed == True
sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
start_date:
description: The date the execution is started.
type: str
returned: if action == start and changed == True
sample: "2019-11-02T22:39:49.071000-07:00"
stop_date:
description: The date the execution is stopped.
type: str
returned: if action == stop
sample: "2019-11-02T22:39:49.071000-07:00"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def start_execution(module, sfn_client):
'''
start_execution uses execution name to determine if a previous execution already exists.
If an execution by the provided name exists, call client.start_execution will not be called.
'''
state_machine_arn = module.params.get('state_machine_arn')
name = module.params.get('name')
execution_input = module.params.get('execution_input')
try:
# list_executions is eventually consistent
page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
for execution in page_iterators.build_full_result()['executions']:
if name == execution['name']:
check_mode(module, msg='State machine execution already exists.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be started.', changed=True)
res_execution = sfn_client.start_execution(
stateMachineArn=state_machine_arn,
name=name,
input=execution_input
)
except (ClientError, BotoCoreError) as e:
if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
# this will never be executed anymore
module.exit_json(changed=False)
module.fail_json_aws(e, msg="Failed to start execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
def stop_execution(module, sfn_client):
cause = module.params.get('cause')
error = module.params.get('error')
execution_arn = module.params.get('execution_arn')
try:
# describe_execution is eventually consistent
execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
if execution_status != 'RUNNING':
check_mode(module, msg='State machine execution is not running.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be stopped.', changed=True)
res = sfn_client.stop_execution(
executionArn=execution_arn,
cause=cause,
error=error
)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to stop execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
def check_mode(module, msg='', changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
action=dict(choices=['start', 'stop'], default='start'),
name=dict(type='str'),
execution_input=dict(type='json', default={}),
state_machine_arn=dict(type='str'),
cause=dict(type='str', default=''),
error=dict(type='str', default=''),
execution_arn=dict(type='str')
)
module = AnsibleAWSModule(
argument_spec=module_args,
required_if=[('action', 'start', ['name', 'state_machine_arn']),
('action', 'stop', ['execution_arn']),
],
supports_check_mode=True
)
sfn_client = module.client('stepfunctions')
action = module.params.get('action')
if action == "start":
start_execution(module, sfn_client)
else:
stop_execution(module, sfn_client)
if __name__ == '__main__':
main()

@ -1,736 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_condition
short_description: Create and delete WAF Conditions
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/)
version_added: "2.5"
author:
- Will Thames (@willthames)
- Mike Mochan (@mmochan)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall condition to manage.
required: true
type: str
type:
description: The type of matching to perform.
choices:
- byte
- geo
- ip
- regex
- size
- sql
- xss
type: str
required: true
filters:
description:
- A list of the filters against which to match.
- For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string).
- For I(type=geo), the only valid key is I(country).
- For I(type=ip), the only valid key is I(ip_address).
- For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern).
- For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size).
- For I(type=sql), valid keys are I(field_to_match) and I(transformation).
- For I(type=xss), valid keys are I(field_to_match) and I(transformation).
- Required when I(state=present).
type: list
elements: dict
suboptions:
field_to_match:
description:
- The field upon which to perform the match.
- Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
type: str
choices: ['uri', 'query_string', 'header', 'method', 'body']
position:
description:
- Where in the field the match needs to occur.
- Only valid when I(type=byte).
type: str
choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word']
header:
description:
- Which specific header should be matched.
- Required when I(field_to_match=header).
- Valid when I(type=byte).
type: str
transformation:
description:
- A transform to apply on the field prior to performing the match.
- Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
type: str
choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode']
country:
description:
- Value of geo constraint (typically a two letter country code).
- The only valid key when I(type=geo).
type: str
ip_address:
description:
- An IP Address or CIDR to match.
- The only valid key when I(type=ip).
type: str
regex_pattern:
description:
- A dict describing the regular expressions used to perform the match.
- Only valid when I(type=regex).
type: dict
suboptions:
name:
description: A name to describe the set of patterns.
type: str
regex_strings:
description: A list of regular expressions to match.
type: list
elements: str
comparison:
description:
- What type of comparison to perform.
- Only valid key when I(type=size).
type: str
choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']
size:
description:
- The size of the field (in bytes).
- Only valid key when I(type=size).
type: int
target_string:
description:
- The string to search for.
- May be up to 50 bytes.
- Valid when I(type=byte).
type: str
purge_filters:
description:
- Whether to remove existing filters from a condition if not passed in I(filters).
default: false
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: no
type: bool
version_added: 2.9
state:
description: Whether the condition should be C(present) or C(absent).
choices:
- present
- absent
default: present
type: str
'''
EXAMPLES = '''
- name: create WAF byte condition
aws_waf_condition:
name: my_byte_condition
filters:
- field_to_match: header
position: STARTS_WITH
target_string: Hello
header: Content-type
type: byte
- name: create WAF geo condition
aws_waf_condition:
name: my_geo_condition
filters:
- country: US
- country: AU
- country: AT
type: geo
- name: create IP address condition
aws_waf_condition:
name: "{{ resource_prefix }}_ip_condition"
filters:
- ip_address: "10.0.0.0/8"
- ip_address: "192.168.0.0/24"
type: ip
- name: create WAF regex condition
aws_waf_condition:
name: my_regex_condition
filters:
- field_to_match: query_string
regex_pattern:
name: greetings
regex_strings:
- '[hH]ello'
- '^Hi there'
- '.*Good Day to You'
type: regex
- name: create WAF size condition
aws_waf_condition:
name: my_size_condition
filters:
- field_to_match: query_string
size: 300
comparison: GT
type: size
- name: create WAF sql injection condition
aws_waf_condition:
name: my_sql_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: sql
- name: create WAF xss condition
aws_waf_condition:
name: my_xss_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: xss
'''
RETURN = '''
condition:
description: Condition returned by operation.
returned: always
type: complex
contains:
condition_id:
description: Type-agnostic ID for the condition.
returned: when state is present
type: str
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
byte_match_set_id:
description: ID for byte match set.
returned: always
type: str
sample: c4882c96-837b-44a2-a762-4ea87dbf812b
byte_match_tuples:
description: List of byte match tuples.
returned: always
type: complex
contains:
field_to_match:
description: Field to match.
returned: always
type: complex
contains:
data:
description: Which specific header (if type is header).
type: str
sample: content-type
type:
description: Type of field
type: str
sample: HEADER
positional_constraint:
description: Position in the field to match.
type: str
sample: STARTS_WITH
target_string:
description: String to look for.
type: str
sample: Hello
text_transformation:
description: Transformation to apply to the field before matching.
type: str
sample: NONE
geo_match_constraints:
description: List of geographical constraints.
returned: when type is geo and state is present
type: complex
contains:
type:
description: Type of geo constraint.
type: str
sample: Country
value:
description: Value of geo constraint (typically a country code).
type: str
sample: AT
geo_match_set_id:
description: ID of the geo match set.
returned: when type is geo and state is present
type: str
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
ip_set_descriptors:
description: list of IP address filters
returned: when type is ip and state is present
type: complex
contains:
type:
description: Type of IP address (IPV4 or IPV6).
returned: always
type: str
sample: IPV4
value:
description: IP address.
returned: always
type: str
sample: 10.0.0.0/8
ip_set_id:
description: ID of condition.
returned: when type is ip and state is present
type: str
sample: 78ad334a-3535-4036-85e6-8e11e745217b
name:
description: Name of condition.
returned: when state is present
type: str
sample: my_waf_condition
regex_match_set_id:
description: ID of the regex match set.
returned: when type is regex and state is present
type: str
sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
regex_match_tuples:
description: List of regex matches.
returned: when type is regex and state is present
type: complex
contains:
field_to_match:
description: Field on which the regex match is applied.
type: complex
contains:
type:
description: The field name.
returned: when type is regex and state is present
type: str
sample: QUERY_STRING
regex_pattern_set_id:
description: ID of the regex pattern.
type: str
sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
text_transformation:
description: transformation applied to the text before matching
type: str
sample: NONE
size_constraint_set_id:
description: ID of the size constraint set.
returned: when type is size and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
size_constraints:
description: List of size constraints to apply.
returned: when type is size and state is present
type: complex
contains:
comparison_operator:
description: Comparison operator to apply.
type: str
sample: GT
field_to_match:
description: Field on which the size constraint is applied.
type: complex
contains:
type:
description: Field name.
type: str
sample: QUERY_STRING
size:
description: Size to compare against the field.
type: int
sample: 300
text_transformation:
description: Transformation applied to the text before matching.
type: str
sample: NONE
sql_injection_match_set_id:
description: ID of the SQL injection match set.
returned: when type is sql and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
sql_injection_match_tuples:
description: List of SQL injection match sets.
returned: when type is sql and state is present
type: complex
contains:
field_to_match:
description: Field on which the SQL injection match is applied.
type: complex
contains:
type:
description: Field name.
type: str
sample: QUERY_STRING
text_transformation:
description: Transformation applied to the text before matching.
type: str
sample: URL_DECODE
xss_match_set_id:
description: ID of the XSS match set.
returned: when type is xss and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
xss_match_tuples:
description: List of XSS match sets.
returned: when type is xss and state is present
type: complex
contains:
field_to_match:
description: Field on which the XSS match is applied.
type: complex
contains:
type:
description: Field name
type: str
sample: QUERY_STRING
text_transformation:
description: transformation applied to the text before matching.
type: str
sample: URL_DECODE
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff
class Condition(object):
def __init__(self, client, module):
self.client = client
self.module = module
self.type = module.params['type']
self.method_suffix = MATCH_LOOKUP[self.type]['method']
self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
self.conditiontype = MATCH_LOOKUP[self.type]['type']
def format_for_update(self, condition_set_id):
# Prep kwargs
kwargs = dict()
kwargs['Updates'] = list()
for filtr in self.module.params.get('filters'):
# Only for ip_set
if self.type == 'ip':
# there might be a better way of detecting an IPv6 address
if ':' in filtr.get('ip_address'):
ip_type = 'IPV6'
else:
ip_type = 'IPV4'
condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
# Specific for geo_match_set
if self.type == 'geo':
condition_insert = dict(Type='Country', Value=filtr.get('country'))
# Common For everything but ip_set and geo_match_set
if self.type not in ('ip', 'geo'):
condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
TextTransformation=filtr.get('transformation', 'none').upper())
if filtr.get('field_to_match').upper() == "HEADER":
if filtr.get('header'):
condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
else:
self.module.fail_json(msg=str("DATA required when HEADER requested"))
# Specific for byte_match_set
if self.type == 'byte':
condition_insert['TargetString'] = filtr.get('target_string')
condition_insert['PositionalConstraint'] = filtr.get('position')
# Specific for size_constraint_set
if self.type == 'size':
condition_insert['ComparisonOperator'] = filtr.get('comparison')
condition_insert['Size'] = filtr.get('size')
# Specific for regex_match_set
if self.type == 'regex':
condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
kwargs[self.conditionsetid] = condition_set_id
return kwargs
def format_for_deletion(self, condition):
return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
for current_condition_tuple in condition[self.conditiontuples]],
self.conditionsetid: condition[self.conditionsetid]}
@AWSRetry.exponential_backoff()
def list_regex_patterns_with_backoff(self, **params):
return self.client.list_regex_pattern_sets(**params)
@AWSRetry.exponential_backoff()
def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
def list_regex_patterns(self):
# at time of writing(2017-11-20) no regex pattern paginator exists
regex_patterns = []
params = {}
while True:
try:
response = self.list_regex_patterns_with_backoff(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list regex patterns')
regex_patterns.extend(response['RegexPatternSets'])
if 'NextMarker' in response:
params['NextMarker'] = response['NextMarker']
else:
break
return regex_patterns
def get_regex_pattern_by_name(self, name):
existing_regex_patterns = self.list_regex_patterns()
regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
if name in regex_lookup:
return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
else:
return None
def ensure_regex_pattern_present(self, regex_pattern):
name = regex_pattern['name']
pattern_set = self.get_regex_pattern_by_name(name)
if not pattern_set:
pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
self.client.create_regex_pattern_set)['RegexPatternSet']
missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
if not missing and not extra:
return pattern_set
updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
self.client.update_regex_pattern_set, wait=True)
return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
def delete_unused_regex_pattern(self, regex_pattern_set_id):
try:
regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
updates = list()
for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
self.client.update_regex_pattern_set)
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id},
self.client.delete_regex_pattern_set, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if e.response['Error']['Code'] == 'WAFNonexistentItemException':
return
self.module.fail_json_aws(e, msg='Could not delete regex pattern')
def get_condition_by_name(self, name):
all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
if all_conditions:
return all_conditions[0][self.conditionsetid]
@AWSRetry.exponential_backoff()
def get_condition_by_id_with_backoff(self, condition_set_id):
params = dict()
params[self.conditionsetid] = condition_set_id
func = getattr(self.client, 'get_' + self.method_suffix)
return func(**params)[self.conditionset]
def get_condition_by_id(self, condition_set_id):
try:
return self.get_condition_by_id_with_backoff(condition_set_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get condition')
def list_conditions(self):
method = 'list_' + self.method_suffix + 's'
try:
paginator = self.client.get_paginator(method)
func = paginator.paginate().build_full_result
except botocore.exceptions.OperationNotPageableError:
# list_geo_match_sets and list_regex_match_sets do not have a paginator
func = getattr(self.client, method)
try:
return func()[self.conditionsets]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
def tidy_up_regex_patterns(self, regex_match_set):
all_regex_match_sets = self.list_conditions()
all_match_set_patterns = list()
for rms in all_regex_match_sets:
all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
for filtr in regex_match_set[self.conditiontuples]:
if filtr['RegexPatternSetId'] not in all_match_set_patterns:
self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
def find_condition_in_rules(self, condition_set_id):
rules_in_use = []
try:
if self.client.__class__.__name__ == 'WAF':
all_rules = list_rules_with_backoff(self.client)
elif self.client.__class__.__name__ == 'WAFRegional':
all_rules = list_regional_rules_with_backoff(self.client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list rules')
for rule in all_rules:
try:
rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get rule details')
if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
rules_in_use.append(rule_details['Name'])
return rules_in_use
def find_and_delete_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
in_use_rules = self.find_condition_in_rules(condition_set_id)
if in_use_rules:
rulenames = ', '.join(in_use_rules)
self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
if current_condition[self.conditiontuples]:
# Filters are deleted using update with the DELETE action
func = getattr(self.client, 'update_' + self.method_suffix)
params = self.format_for_deletion(current_condition)
try:
# We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete filters from condition')
func = getattr(self.client, 'delete_' + self.method_suffix)
params = dict()
params[self.conditionsetid] = condition_set_id
try:
run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete condition')
# tidy up regex patterns
if self.type == 'regex':
self.tidy_up_regex_patterns(current_condition)
return True, {}
def find_missing(self, update, current_condition):
missing = []
for desired in update['Updates']:
found = False
desired_condition = desired[self.conditiontuple]
current_conditions = current_condition[self.conditiontuples]
for condition in current_conditions:
if not compare_policies(condition, desired_condition):
found = True
if not found:
missing.append(desired)
return missing
def find_and_update_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
update = self.format_for_update(condition_set_id)
missing = self.find_missing(update, current_condition)
if self.module.params.get('purge_filters'):
extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
for current_tuple in current_condition[self.conditiontuples]
if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
else:
extra = []
changed = bool(missing or extra)
if changed:
update['Updates'] = missing + extra
func = getattr(self.client, 'update_' + self.method_suffix)
try:
result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not update condition')
return changed, self.get_condition_by_id(condition_set_id)
def ensure_condition_present(self):
name = self.module.params['name']
condition_set_id = self.get_condition_by_name(name)
if condition_set_id:
return self.find_and_update_condition(condition_set_id)
else:
params = dict()
params['Name'] = name
func = getattr(self.client, 'create_' + self.method_suffix)
try:
condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not create condition')
return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
def ensure_condition_absent(self):
condition_set_id = self.get_condition_by_name(self.module.params['name'])
if condition_set_id:
return self.find_and_delete_condition(condition_set_id)
return False, {}
def main():
filters_subspec = dict(
country=dict(),
field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
header=dict(),
transformation=dict(choices=['none', 'compress_white_space',
'html_entity_decode', 'lowercase',
'cmd_line', 'url_decode']),
position=dict(choices=['exactly', 'starts_with', 'ends_with',
'contains', 'contains_word']),
comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
target_string=dict(), # Bytes
size=dict(type='int'),
ip_address=dict(),
regex_pattern=dict(),
)
argument_spec = dict(
name=dict(required=True),
type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
filters=dict(type='list'),
purge_filters=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['filters']]])
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
condition = Condition(client, module)
if state == 'present':
(changed, results) = condition.ensure_condition_present()
# return a condition agnostic ID for use by aws_waf_rule
results['ConditionId'] = results[condition.conditionsetid]
else:
(changed, results) = condition.ensure_condition_absent()
module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,149 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_info
short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters.
description:
- Retrieve information for WAF ACLs, Rule , Conditions and Filters.
- This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change.
version_added: "2.4"
requirements: [ boto3 ]
options:
name:
description:
- The name of a Web Application Firewall.
type: str
waf_regional:
description: Whether to use the waf-regional module.
default: false
required: no
type: bool
version_added: "2.9"
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: obtain all WAF information
aws_waf_info:
- name: obtain all information for a single WAF
aws_waf_info:
name: test_waf
- name: obtain all information for a single WAF Regional
aws_waf_info:
name: test_waf
waf_regional: true
'''
RETURN = '''
wafs:
description: The WAFs that match the passed arguments.
returned: success
type: complex
contains:
name:
description: A friendly name or description of the WebACL.
returned: always
type: str
sample: test_waf
default_action:
description: The action to perform if none of the Rules contained in the WebACL match.
returned: always
type: int
sample: BLOCK
metric_name:
description: A friendly name or description for the metrics for this WebACL.
returned: always
type: str
sample: test_waf_metric
rules:
description: An array that contains the action for each Rule in a WebACL , the priority of the Rule.
returned: always
type: complex
contains:
action:
description: The action to perform if the Rule matches.
returned: always
type: str
sample: BLOCK
metric_name:
description: A friendly name or description for the metrics for this Rule.
returned: always
type: str
sample: ipblockrule
name:
description: A friendly name or description of the Rule.
returned: always
type: str
sample: ip_block_rule
predicates:
description: The Predicates list contains a Predicate for each
ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet
object in a Rule.
returned: always
type: list
sample:
[
{
"byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890",
"byte_match_tuples": [
{
"field_to_match": {
"type": "QUERY_STRING"
},
"positional_constraint": "STARTS_WITH",
"target_string": "bobbins",
"text_transformation": "NONE"
}
],
"name": "bobbins",
"negated": false,
"type": "ByteMatch"
}
]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.waf import list_web_acls, get_web_acl
def main():
argument_spec = dict(
name=dict(required=False),
waf_regional=dict(type='bool', default=False)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'aws_waf_facts':
module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", version='2.13')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
web_acls = list_web_acls(client, module)
name = module.params['name']
if name:
web_acls = [web_acl for web_acl in web_acls if
web_acl['Name'] == name]
if not web_acls:
module.fail_json(msg="WAF named %s not found" % name)
module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
for web_acl in web_acls])
if __name__ == '__main__':
main()

@ -1,355 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_rule
short_description: Create and delete WAF Rules
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/).
version_added: "2.5"
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall rule.
required: yes
type: str
metric_name:
description:
- A friendly name or description for the metrics for the rule.
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change I(metric_name) after you create the rule.
- Defaults to the same as I(name) with disallowed characters removed.
type: str
state:
description: Whether the rule should be present or absent.
choices:
- present
- absent
default: present
type: str
conditions:
description: >
List of conditions used in the rule. M(aws_waf_condition) can be used to
create new conditions.
type: list
elements: dict
suboptions:
type:
required: true
type: str
choices: ['byte','geo','ip','size','sql','xss']
description: The type of rule to match.
negated:
required: true
type: bool
description: Whether the condition should be negated.
condition:
required: true
type: str
description: The name of the condition. The condition must already exist.
purge_conditions:
description:
- Whether or not to remove conditions that are not passed when updating `conditions`.
default: false
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: false
type: bool
version_added: "2.9"
'''
EXAMPLES = '''
- name: create WAF rule
aws_waf_rule:
name: my_waf_rule
conditions:
- name: my_regex_condition
type: regex
negated: no
- name: my_geo_condition
type: geo
negated: no
- name: my_byte_condition
type: byte
negated: yes
- name: remove WAF rule
aws_waf_rule:
name: "my_waf_rule"
state: absent
'''
RETURN = '''
rule:
description: WAF rule contents
returned: always
type: complex
contains:
metric_name:
description: Metric name for the rule.
returned: always
type: str
sample: ansibletest1234rule
name:
description: Friendly name for the rule.
returned: always
type: str
sample: ansible-test-1234_rule
predicates:
description: List of conditions used in the rule.
returned: always
type: complex
contains:
data_id:
description: ID of the condition.
returned: always
type: str
sample: 8251acdb-526c-42a8-92bc-d3d13e584166
negated:
description: Whether the sense of the condition is negated.
returned: always
type: bool
sample: false
type:
description: type of the condition.
returned: always
type: str
sample: ByteMatch
rule_id:
description: ID of the WAF rule.
returned: always
type: str
sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
'''
import re
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff
def get_rule_by_name(client, module, name):
rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
if rules:
return rules[0]
def get_rule(client, module, rule_id):
try:
return client.get_rule(RuleId=rule_id)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get WAF rule')
def list_rules(client, module):
if client.__class__.__name__ == 'WAF':
try:
return list_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF Regional rules')
def list_regional_rules(client, module):
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
def find_and_update_rule(client, module, rule_id):
rule = get_rule(client, module, rule_id)
rule_id = rule['RuleId']
existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
all_conditions = dict()
for condition_type in MATCH_LOOKUP:
method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
all_conditions[condition_type] = dict()
try:
paginator = client.get_paginator(method)
func = paginator.paginate().build_full_result
except (KeyError, botocore.exceptions.OperationNotPageableError):
# list_geo_match_sets and list_regex_match_sets do not have a paginator
# and throw different exceptions
func = getattr(client, method)
try:
pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
for pred in pred_results:
pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
for condition in module.params['conditions']:
desired_conditions[condition['type']][condition['name']] = condition
reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
for condition in rule['Predicates']:
existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
insertions = list()
deletions = list()
for condition_type in desired_conditions:
for (condition_name, condition) in desired_conditions[condition_type].items():
if condition_name not in all_conditions[condition_type]:
module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
if condition['data_id'] not in existing_conditions[condition_type]:
insertions.append(format_for_insertion(condition))
if module.params['purge_conditions']:
for condition_type in existing_conditions:
deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
changed = bool(insertions or deletions)
update = {
'RuleId': rule_id,
'Updates': insertions + deletions
}
if changed:
try:
run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update rule conditions')
return changed, get_rule(client, module, rule_id)
def format_for_insertion(condition):
return dict(Action='INSERT',
Predicate=dict(Negated=condition['negated'],
Type=MATCH_LOOKUP[condition['type']]['type'],
DataId=condition['data_id']))
def format_for_deletion(condition):
return dict(Action='DELETE',
Predicate=dict(Negated=condition['negated'],
Type=condition['type'],
DataId=condition['data_id']))
def remove_rule_conditions(client, module, rule_id):
conditions = get_rule(client, module, rule_id)['Predicates']
updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
try:
run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule conditions')
def ensure_rule_present(client, module):
name = module.params['name']
rule_id = get_rule_by_name(client, module, name)
params = dict()
if rule_id:
return find_and_update_rule(client, module, rule_id)
else:
params['Name'] = module.params['name']
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
params['MetricName'] = metric_name
try:
new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create rule')
return find_and_update_rule(client, module, new_rule['RuleId'])
def find_rule_in_web_acls(client, module, rule_id):
web_acls_in_use = []
try:
if client.__class__.__name__ == 'WAF':
all_web_acls = list_web_acls_with_backoff(client)
elif client.__class__.__name__ == 'WAFRegional':
all_web_acls = list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list Web ACLs')
for web_acl in all_web_acls:
try:
web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL details')
if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
web_acls_in_use.append(web_acl_details['Name'])
return web_acls_in_use
def ensure_rule_absent(client, module):
rule_id = get_rule_by_name(client, module, module.params['name'])
in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
if in_use_web_acls:
web_acl_names = ', '.join(in_use_web_acls)
module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
(module.params['name'], web_acl_names))
if rule_id:
remove_rule_conditions(client, module, rule_id)
try:
return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete rule')
return False, {}
def main():
argument_spec = dict(
name=dict(required=True),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
conditions=dict(type='list'),
purge_conditions=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
if state == 'present':
(changed, results) = ensure_rule_present(client, module)
else:
(changed, results) = ensure_rule_absent(client, module)
module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,359 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_web_acl
short_description: Create and delete WAF Web ACLs.
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/).
version_added: "2.5"
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall ACL to manage.
required: yes
type: str
default_action:
description: The action that you want AWS WAF to take when a request doesn't
match the criteria specified in any of the Rule objects that are associated with the WebACL.
choices:
- block
- allow
- count
type: str
state:
description: Whether the Web ACL should be present or absent.
choices:
- present
- absent
default: present
type: str
metric_name:
description:
- A friendly name or description for the metrics for this WebACL.
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change I(metric_name) after you create the WebACL.
- Metric name will default to I(name) with disallowed characters stripped out.
type: str
rules:
description:
- A list of rules that the Web ACL will enforce.
type: list
elements: dict
suboptions:
name:
description: Name of the rule.
type: str
required: true
action:
description: The action to perform.
type: str
required: true
priority:
description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
type: int
required: true
type:
description: The type of rule.
choices:
- rate_based
- regular
type: str
purge_rules:
description:
- Whether to remove rules that aren't passed with I(rules).
default: False
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: no
type: bool
version_added: "2.9"
'''
EXAMPLES = '''
- name: create web ACL
aws_waf_web_acl:
name: my_web_acl
rules:
- name: my_rule
priority: 1
action: block
default_action: block
purge_rules: yes
state: present
- name: delete the web acl
aws_waf_web_acl:
name: my_web_acl
state: absent
'''
RETURN = '''
web_acl:
description: contents of the Web ACL.
returned: always
type: complex
contains:
default_action:
description: Default action taken by the Web ACL if no rules match.
returned: always
type: dict
sample:
type: BLOCK
metric_name:
description: Metric name used as an identifier.
returned: always
type: str
sample: mywebacl
name:
description: Friendly name of the Web ACL.
returned: always
type: str
sample: my web acl
rules:
description: List of rules.
returned: always
type: complex
contains:
action:
description: Action taken by the WAF when the rule matches.
returned: always
type: complex
sample:
type: ALLOW
priority:
description: priority number of the rule (lower numbers are run first).
returned: always
type: int
sample: 2
rule_id:
description: Rule ID.
returned: always
type: str
sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
type:
description: Type of rule (either REGULAR or RATE_BASED).
returned: always
type: str
sample: REGULAR
web_acl_id:
description: Unique identifier of Web ACL.
returned: always
type: str
sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
import re
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.waf import list_rules_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff, \
run_func_with_change_token_backoff, list_regional_rules_with_backoff
def get_web_acl_by_name(client, module, name):
acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
if acls:
return acls[0]
else:
return acls
def create_rule_lookup(client, module):
if client.__class__.__name__ == 'WAF':
try:
rules = list_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
rules = list_regional_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list regional rules')
def get_web_acl(client, module, web_acl_id):
try:
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
def list_web_acls(client, module,):
if client.__class__.__name__ == 'WAF':
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
def find_and_update_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
rule_lookup = create_rule_lookup(client, module)
existing_rules = acl['Rules']
desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
'Priority': rule['priority'],
'Action': {'Type': rule['action'].upper()},
'Type': rule.get('type', 'regular').upper()}
for rule in module.params['rules']]
missing = [rule for rule in desired_rules if rule not in existing_rules]
extras = []
if module.params['purge_rules']:
extras = [rule for rule in existing_rules if rule not in desired_rules]
insertions = [format_for_update(rule, 'INSERT') for rule in missing]
deletions = [format_for_update(rule, 'DELETE') for rule in extras]
changed = bool(insertions + deletions)
# Purge rules before adding new ones in case a deletion shares the same
# priority as an insertion.
params = {
'WebACLId': acl['WebACLId'],
'DefaultAction': acl['DefaultAction']
}
change_tokens = []
if deletions:
try:
params['Updates'] = deletions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=result['ChangeToken']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if insertions:
try:
params['Updates'] = insertions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if change_tokens:
for token in change_tokens:
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=token
)
if changed:
acl = get_web_acl(client, module, web_acl_id)
return changed, acl
def format_for_update(rule, action):
return dict(
Action=action,
ActivatedRule=dict(
Priority=rule['Priority'],
RuleId=rule['RuleId'],
Action=dict(
Type=rule['Action']['Type']
)
)
)
def remove_rules_from_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
try:
params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule')
def ensure_web_acl_present(client, module):
changed = False
result = None
name = module.params['name']
web_acl_id = get_web_acl_by_name(client, module, name)
if web_acl_id:
(changed, result) = find_and_update_web_acl(client, module, web_acl_id)
else:
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
default_action = module.params['default_action'].upper()
try:
params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create Web ACL')
(changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
return changed, result
def ensure_web_acl_absent(client, module):
web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
if web_acl_id:
web_acl = get_web_acl(client, module, web_acl_id)
if web_acl['Rules']:
remove_rules_from_web_acl(client, module, web_acl_id)
try:
run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
return True, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete Web ACL')
return False, {}
def main():
argument_spec = dict(
name=dict(required=True),
default_action=dict(choices=['block', 'allow', 'count']),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
rules=dict(type='list'),
purge_rules=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['default_action', 'rules']]])
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
if state == 'present':
(changed, results) = ensure_web_acl_present(client, module)
else:
(changed, results) = ensure_web_acl_absent(client, module)
module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()

@ -1,87 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: cloudformation_exports_info
short_description: Read a value from CloudFormation Exports
description:
- Module retrieves a value from CloudFormation Exports
requirements: ['boto3 >= 1.11.15']
version_added: "2.10"
author:
- "Michael Moyle (@mmoyle)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Get Exports
cloudformation_exports_info:
profile: 'my_aws_profile'
region: 'my_region'
register: cf_exports
- debug:
msg: "{{ cf_exports }}"
'''
RETURN = '''
export_items:
description: A dictionary of Exports items names and values.
returned: Always
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry
try:
from botocore.exceptions import ClientError
from botocore.exceptions import BotoCoreError
except ImportError:
pass # handled by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def list_exports(cloudformation_client):
'''Get Exports Names and Values and return in dictionary '''
list_exports_paginator = cloudformation_client.get_paginator('list_exports')
exports = list_exports_paginator.paginate().build_full_result()['Exports']
export_items = dict()
for item in exports:
export_items[item['Name']] = item['Value']
return export_items
def main():
argument_spec = dict()
result = dict(
changed=False,
original_message=''
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
cloudformation_client = module.client('cloudformation')
try:
result['export_items'] = list_exports(cloudformation_client)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e)
result.update()
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,724 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudformation_stack_set
short_description: Manage groups of CloudFormation stacks
description:
- Launches/updates/deletes AWS CloudFormation Stack Sets.
notes:
- To make an individual stack, you want the M(cloudformation) module.
version_added: "2.7"
options:
name:
description:
- Name of the CloudFormation stack set.
required: true
type: str
description:
description:
- A description of what this stack set creates.
type: str
parameters:
description:
- A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
default: {}
type: dict
state:
description:
- If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated.
If I(state=absent), stack will be removed.
default: present
choices: [ present, absent ]
type: str
template:
description:
- The local path of the CloudFormation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like C(roles/cloudformation/files/cloudformation-example.json).
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
are specified, the previous template will be reused.
type: path
template_body:
description:
- Template body. Use this to pass in the actual body of the CloudFormation template.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
are specified, the previous template will be reused.
type: str
template_url:
description:
- Location of file containing the template body.
- The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
as the stack.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
are specified, the previous template will be reused.
type: str
purge_stacks:
description:
- Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted.
- By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false).
type: bool
default: true
wait:
description:
- Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status.
- If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish.
type: bool
default: false
wait_timeout:
description:
- How long to wait (in seconds) for stacks to complete create/update/delete operations.
default: 900
type: int
capabilities:
description:
- Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles.
- Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided.
- >
The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey,
AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition
type: list
elements: str
choices:
- 'CAPABILITY_IAM'
- 'CAPABILITY_NAMED_IAM'
regions:
description:
- A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions)
specifies the region for stack instances.
- At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
have their stack instances updated.
type: list
elements: str
accounts:
description:
- A list of AWS accounts in which to create instance of CloudFormation stacks.
- At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
have their stack instances updated.
type: list
elements: str
administration_role_arn:
description:
- ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts.
- This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the
account number of the current IAM role/user/STS credentials.
aliases:
- admin_role_arn
- admin_role
- administration_role
type: str
execution_role_name:
description:
- ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts.
- This MUST NOT be an ARN, and the roles must exist in each child account specified.
- The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole)
aliases:
- exec_role_name
- exec_role
- execution_role
type: str
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation.
- Can be updated later, updating tags removes previous entries.
type: dict
failure_tolerance:
description:
- Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time.
type: dict
suboptions:
fail_count:
description:
- The number of accounts, per region, for which this operation can fail before CloudFormation
stops the operation in that region.
- You must specify one of I(fail_count) and I(fail_percentage).
type: int
fail_percentage:
type: int
description:
- The percentage of accounts, per region, for which this stack operation can fail before CloudFormation
stops the operation in that region.
- You must specify one of I(fail_count) and I(fail_percentage).
parallel_percentage:
type: int
description:
- The maximum percentage of accounts in which to perform this operation at one time.
- You must specify one of I(parallel_count) and I(parallel_percentage).
- Note that this setting lets you specify the maximum for operations.
For large deployments, under certain circumstances the actual percentage may be lower.
parallel_count:
type: int
description:
- The maximum number of accounts in which to perform this operation at one time.
- I(parallel_count) may be at most one more than the I(fail_count).
- You must specify one of I(parallel_count) and I(parallel_percentage).
- Note that this setting lets you specify the maximum for operations.
For large deployments, under certain circumstances the actual count may be lower.
author: "Ryan Scott Brown (@ryansb)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3>=1.6, botocore>=1.10.26 ]
'''
EXAMPLES = '''
- name: Create a stack set with instances in two accounts
cloudformation_stack_set:
name: my-stack
description: Test stack in two accounts
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
accounts: [1234567890, 2345678901]
regions:
- us-east-1
- name: on subsequent calls, templates are optional but parameters and tags can be altered
cloudformation_stack_set:
name: my-stack
state: present
parameters:
InstanceName: my_stacked_instance
tags:
foo: bar
test: stack
accounts: [1234567890, 2345678901]
regions:
- us-east-1
- name: The same type of update, but wait for the update to complete in all stacks
cloudformation_stack_set:
name: my-stack
state: present
wait: true
parameters:
InstanceName: my_restacked_instance
tags:
foo: bar
test: stack
accounts: [1234567890, 2345678901]
regions:
- us-east-1
'''
RETURN = '''
operations_log:
type: list
description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
returned: always
sample:
- action: CREATE
creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
end_timestamp: '2018-06-18T17:41:24.560000+00:00'
operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
status: FAILED
stack_instances:
- account: '1234567890'
region: us-east-1
stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
status: OUTDATED
status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
operations:
description: All operations initiated by this run of the cloudformation_stack_set module
returned: always
type: list
sample:
- action: CREATE
administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
end_timestamp: '2018-06-18T17:41:24.560000+00:00'
execution_role_name: AWSCloudFormationStackSetExecutionRole
operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
operation_preferences:
region_order:
- us-east-1
- us-east-2
stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
status: FAILED
stack_instances:
description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID.
returned: state == present
type: list
sample:
- account: '1234567890'
region: us-east-1
stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
status: OUTDATED
status_reason: >
Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
- account: '1234567890'
region: us-east-2
stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
status: OUTDATED
status_reason: Cancelled since failure tolerance has exceeded
stack_set:
type: dict
description: Facts about the currently deployed stack set, its parameters, and its tags
returned: state == present
sample:
administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
capabilities: []
description: test stack PRIME
execution_role_name: AWSCloudFormationStackSetExecutionRole
parameters: []
stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929
stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
stack_set_name: TestStackPrime
status: ACTIVE
tags:
Some: Thing
an: other
template_body: |
AWSTemplateFormatVersion: "2010-09-09"
Parameters: {}
Resources:
Bukkit:
Type: "AWS::S3::Bucket"
Properties: {}
other:
Type: "AWS::SNS::Topic"
Properties: {}
''' # NOQA
import time
import datetime
import uuid
import itertools
try:
import boto3
import botocore.exceptions
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
# handled by AnsibleAWSModule
pass
from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, camel_dict_to_snake_dict
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils._text import to_native
def create_stack_set(module, stack_params, cfn):
try:
cfn.create_stack_set(aws_retry=True, **stack_params)
return await_stack_set_exists(cfn, stack_params['StackSetName'])
except (ClientError, BotoCoreError) as err:
module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName')))
def update_stack_set(module, stack_params, cfn):
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack_set(**stack_params)
except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except
module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.")
except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except
module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check "
"the `accounts` and `regions` parameters.")
except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except
module.fail_json_aws(
err, msg="Another operation is already in progress on this stack set - please try again later. When making "
"multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors.")
except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
module.fail_json_aws(err, msg="Could not update stack set.")
if module.params.get('wait'):
await_stack_set_operation(
module, cfn, operation_id=stack_params['OperationId'],
stack_set_name=stack_params['StackSetName'],
max_wait=module.params.get('wait_timeout'),
)
return True
def compare_stack_instances(cfn, stack_set_name, accounts, regions):
instance_list = cfn.list_stack_instances(
aws_retry=True,
StackSetName=stack_set_name,
)['Summaries']
desired_stack_instances = set(itertools.product(accounts, regions))
existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list)
# new stacks, existing stacks, unspecified stacks
return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances)
@AWSRetry.backoff(tries=3, delay=4)
def stack_set_facts(cfn, stack_set_name):
try:
ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet']
ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
return ss
except cfn.exceptions.from_code('StackSetNotFound'):
# Return None if the stack doesn't exist
return
def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait):
wait_start = datetime.datetime.now()
operation = None
for i in range(max_wait // 15):
try:
operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id)
if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'):
# Stack set has completed operation
break
except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
pass
except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except
pass
time.sleep(15)
if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'):
await_stack_instance_completion(
module, cfn,
stack_set_name=stack_set_name,
# subtract however long we waited already
max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()),
)
elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'):
pass
else:
module.warn(
"Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format(
operation_id, stack_set_name, max_wait
)
)
def await_stack_instance_completion(module, cfn, stack_set_name, max_wait):
to_await = None
for i in range(max_wait // 15):
try:
stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name)
to_await = [inst for inst in stack_instances['Summaries']
if inst['Status'] != 'CURRENT']
if not to_await:
return stack_instances['Summaries']
except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
# this means the deletion beat us, or the stack set is not yet propagated
pass
time.sleep(15)
module.warn(
"Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format(
stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait
)
)
def await_stack_set_exists(cfn, stack_set_name):
# AWSRetry will retry on `StackSetNotFound` errors for us
ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet']
ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
return camel_dict_to_snake_dict(ss, ignore_list=('Tags',))
def describe_stack_tree(module, stack_set_name, operation_ids=None):
jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound'])
cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
result = dict()
result['stack_set'] = camel_dict_to_snake_dict(
cfn.describe_stack_set(
StackSetName=stack_set_name,
aws_retry=True,
)['StackSet']
)
result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags'])
result['operations_log'] = sorted(
camel_dict_to_snake_dict(
cfn.list_stack_set_operations(
StackSetName=stack_set_name,
aws_retry=True,
)
)['summaries'],
key=lambda x: x['creation_timestamp']
)
result['stack_instances'] = sorted(
[
camel_dict_to_snake_dict(i) for i in
cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries']
],
key=lambda i: i['region'] + i['account']
)
if operation_ids:
result['operations'] = []
for op_id in operation_ids:
try:
result['operations'].append(camel_dict_to_snake_dict(
cfn.describe_stack_set_operation(
StackSetName=stack_set_name,
OperationId=op_id,
)['StackSetOperation']
))
except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except
pass
return result
def get_operation_preferences(module):
params = dict()
if module.params.get('regions'):
params['RegionOrder'] = list(module.params['regions'])
for param, api_name in {
'fail_count': 'FailureToleranceCount',
'fail_percentage': 'FailureTolerancePercentage',
'parallel_percentage': 'MaxConcurrentPercentage',
'parallel_count': 'MaxConcurrentCount',
}.items():
if module.params.get('failure_tolerance', {}).get(param):
params[api_name] = module.params.get('failure_tolerance', {}).get(param)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=900),
state=dict(default='present', choices=['present', 'absent']),
purge_stacks=dict(type='bool', default=True),
parameters=dict(type='dict', default={}),
template=dict(type='path'),
template_url=dict(),
template_body=dict(),
capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
regions=dict(type='list'),
accounts=dict(type='list'),
failure_tolerance=dict(
type='dict',
default={},
options=dict(
fail_count=dict(type='int'),
fail_percentage=dict(type='int'),
parallel_percentage=dict(type='int'),
parallel_count=dict(type='int'),
),
mutually_exclusive=[
['fail_count', 'fail_percentage'],
['parallel_count', 'parallel_percentage'],
],
),
administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
tags=dict(type='dict'),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template', 'template_body']],
supports_check_mode=True
)
if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')):
module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26")
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound'])
cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
existing_stack_set = stack_set_facts(cfn, module.params['name'])
operation_uuid = to_native(uuid.uuid4())
operation_ids = []
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {}
state = module.params['state']
if state == 'present' and not module.params['accounts']:
module.fail_json(
msg="Can't create a stack set without choosing at least one account. "
"To get the ID of the current account, use the aws_caller_info module."
)
module.params['accounts'] = [to_native(a) for a in module.params['accounts']]
stack_params['StackSetName'] = module.params['name']
if module.params.get('description'):
stack_params['Description'] = module.params['description']
if module.params.get('capabilities'):
stack_params['Capabilities'] = module.params['capabilities']
if module.params['template'] is not None:
with open(module.params['template'], 'r') as tpl:
stack_params['TemplateBody'] = tpl.read()
elif module.params['template_body'] is not None:
stack_params['TemplateBody'] = module.params['template_body']
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
else:
# no template is provided, but if the stack set exists already, we can use the existing one.
if existing_stack_set:
stack_params['UsePreviousTemplate'] = True
else:
module.fail_json(
msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
"`template_body`, or `template_url`".format(module.params['name'])
)
stack_params['Parameters'] = []
for k, v in module.params['parameters'].items():
if isinstance(v, dict):
# set parameter based on a dict to allow additional CFN Parameter Attributes
param = dict(ParameterKey=k)
if 'value' in v:
param['ParameterValue'] = to_native(v['value'])
if 'use_previous_value' in v and bool(v['use_previous_value']):
param['UsePreviousValue'] = True
param.pop('ParameterValue', None)
stack_params['Parameters'].append(param)
else:
# allow default k/v configuration to set a template parameter
stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('administration_role_arn'):
# TODO loosen the semantics here to autodetect the account ID and build the ARN
stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
if module.params.get('execution_role_name'):
stack_params['ExecutionRoleName'] = module.params['execution_role_name']
result = {}
if module.check_mode:
if state == 'absent' and existing_stack_set:
module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
elif state == 'absent' and not existing_stack_set:
module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
elif state == 'present' and not existing_stack_set:
module.exit_json(changed=True, msg='New stack set would be created', meta=[])
elif state == 'present' and existing_stack_set:
new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
cfn,
module.params['name'],
module.params['accounts'],
module.params['regions'],
)
if new_stacks:
module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
elif unspecified_stacks and module.params.get('purge_stack_instances'):
module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
else:
# TODO: need to check the template and other settings for correct check mode
module.exit_json(changed=False, msg='No changes detected', meta=[])
changed = False
if state == 'present':
if not existing_stack_set:
# on create this parameter has a different name, and cannot be referenced later in the job log
stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
changed = True
create_stack_set(module, stack_params, cfn)
else:
stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
operation_ids.append(stack_params['OperationId'])
if module.params.get('regions'):
stack_params['OperationPreferences'] = get_operation_preferences(module)
changed |= update_stack_set(module, stack_params, cfn)
# now create/update any appropriate stack instances
new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
cfn,
module.params['name'],
module.params['accounts'],
module.params['regions'],
)
if new_stack_instances:
operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
changed = True
cfn.create_stack_instances(
StackSetName=module.params['name'],
Accounts=list(set(acct for acct, region in new_stack_instances)),
Regions=list(set(region for acct, region in new_stack_instances)),
OperationPreferences=get_operation_preferences(module),
OperationId=operation_ids[-1],
)
else:
operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
cfn.update_stack_instances(
StackSetName=module.params['name'],
Accounts=list(set(acct for acct, region in existing_stack_instances)),
Regions=list(set(region for acct, region in existing_stack_instances)),
OperationPreferences=get_operation_preferences(module),
OperationId=operation_ids[-1],
)
for op in operation_ids:
await_stack_set_operation(
module, cfn, operation_id=op,
stack_set_name=module.params['name'],
max_wait=module.params.get('wait_timeout'),
)
elif state == 'absent':
if not existing_stack_set:
module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
if module.params.get('purge_stack_instances') is False:
pass
try:
cfn.delete_stack_set(
StackSetName=module.params['name'],
)
module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except
delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
cfn.delete_stack_instances(
StackSetName=module.params['name'],
Accounts=module.params['accounts'],
Regions=module.params['regions'],
RetainStacks=(not module.params.get('purge_stacks')),
OperationId=delete_instances_op
)
await_stack_set_operation(
module, cfn, operation_id=delete_instances_op,
stack_set_name=stack_params['StackSetName'],
max_wait=module.params.get('wait_timeout'),
)
try:
cfn.delete_stack_set(
StackSetName=module.params['name'],
)
except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except
# this time, it is likely that either the delete failed or there are more stacks.
instances = cfn.list_stack_instances(
StackSetName=module.params['name'],
)
stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))
result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
if any(o['status'] == 'FAILED' for o in result['operations']):
module.fail_json(msg="One or more operations failed to execute", **result)
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,729 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_info
short_description: Obtain facts about an AWS CloudFront distribution
description:
- Gets information about an AWS CloudFront distribution.
- This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(cloudfront_info) module no longer returns C(ansible_facts)!
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.3"
author: Willem van Ketwich (@wilvk)
options:
distribution_id:
description:
- The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
required: false
type: str
invalidation_id:
description:
- The id of the invalidation to get information about.
- Used with I(invalidation).
required: false
type: str
origin_access_identity_id:
description:
- The id of the CloudFront origin access identity to get information about.
required: false
type: str
# web_acl_id:
# description:
# - Used with I(list_distributions_by_web_acl_id).
# required: false
# type: str
domain_name_alias:
description:
- Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront
distribution to get the distribution id where required.
required: false
type: str
all_lists:
description:
- Get all CloudFront lists that do not require parameters.
required: false
default: false
type: bool
origin_access_identity:
description:
- Get information about an origin access identity.
- Requires I(origin_access_identity_id) to be specified.
required: false
default: false
type: bool
origin_access_identity_config:
description:
- Get the configuration information about an origin access identity.
- Requires I(origin_access_identity_id) to be specified.
required: false
default: false
type: bool
distribution:
description:
- Get information about a distribution.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
type: bool
distribution_config:
description:
- Get the configuration information about a distribution.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
type: bool
invalidation:
description:
- Get information about an invalidation.
- Requires I(invalidation_id) to be specified.
required: false
default: false
type: bool
streaming_distribution:
description:
- Get information about a specified RTMP distribution.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
type: bool
streaming_distribution_config:
description:
- Get the configuration information about a specified RTMP distribution.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
type: bool
list_origin_access_identities:
description:
- Get a list of CloudFront origin access identities.
- Requires I(origin_access_identity_id) to be set.
required: false
default: false
type: bool
list_distributions:
description:
- Get a list of CloudFront distributions.
required: false
default: false
type: bool
list_distributions_by_web_acl_id:
description:
- Get a list of distributions using web acl id as a filter.
- Requires I(web_acl_id) to be set.
required: false
default: false
type: bool
list_invalidations:
description:
- Get a list of invalidations.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
type: bool
list_streaming_distributions:
description:
- Get a list of streaming distributions.
required: false
default: false
type: bool
summary:
description:
- Returns a summary of all distributions, streaming distributions and origin_access_identities.
- This is the default behaviour if no option is selected.
required: false
default: false
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get a summary of distributions
- cloudfront_info:
summary: true
register: result
# Get information about a distribution
- cloudfront_info:
distribution: true
distribution_id: my-cloudfront-distribution-id
register: result_did
- debug:
msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}"
# Get information about a distribution using the CNAME of the cloudfront distribution.
- cloudfront_info:
distribution: true
domain_name_alias: www.my-website.com
register: result_website
- debug:
msg: "{{ result_website['cloudfront']['www.my-website.com'] }}"
# When the module is called as cloudfront_facts, return values are published
# in ansible_facts['cloudfront'][<id>] and can be used as follows.
# Note that this is deprecated and will stop working in Ansible 2.13.
- cloudfront_facts:
distribution: true
distribution_id: my-cloudfront-distribution-id
- debug:
msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}"
- cloudfront_facts:
distribution: true
domain_name_alias: www.my-website.com
- debug:
msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}"
# Get all information about an invalidation for a distribution.
- cloudfront_facts:
invalidation: true
distribution_id: my-cloudfront-distribution-id
invalidation_id: my-cloudfront-invalidation-id
# Get all information about a CloudFront origin access identity.
- cloudfront_facts:
origin_access_identity: true
origin_access_identity_id: my-cloudfront-origin-access-identity-id
# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
- cloudfront_facts:
origin_access_identity: true
origin_access_identity_id: my-cloudfront-origin-access-identity-id
# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
- cloudfront_facts:
all_lists: true
'''
RETURN = '''
origin_access_identity:
description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
returned: only if I(origin_access_identity) is true
type: dict
origin_access_identity_configuration:
description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
returned: only if I(origin_access_identity_configuration) is true
type: dict
distribution:
description: >
Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias)
to be specified. Requires I(origin_access_identity_id) to be set.
returned: only if distribution is true
type: dict
distribution_config:
description: >
Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
to be specified.
returned: only if I(distribution_config) is true
type: dict
invalidation:
description: >
Describes the invalidation information for the distribution. Requires
I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
returned: only if invalidation is true
type: dict
streaming_distribution:
description: >
Describes the streaming information for the distribution. Requires
I(distribution_id) or I(domain_name_alias) to be specified.
returned: only if I(streaming_distribution) is true
type: dict
streaming_distribution_config:
description: >
Describes the streaming configuration information for the distribution.
Requires I(distribution_id) or I(domain_name_alias) to be specified.
returned: only if I(streaming_distribution_config) is true
type: dict
summary:
description: Gives a summary of distributions, streaming distributions and origin access identities.
returned: as default or if summary is true
type: dict
result:
description: >
Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id
as figuring out the DistributionId is usually the reason one uses this module in the first place.
returned: always
type: dict
'''
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
from ansible.module_utils.basic import AnsibleModule
from functools import partial
import traceback
try:
import botocore
except ImportError:
pass # will be caught by imported HAS_BOTO3
class CloudFrontServiceManager:
"""Handles CloudFront Services"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = boto3_conn(module, conn_type='client',
resource='cloudfront', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoRegionError:
self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION "
"environment variable or in boto configuration file")
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Can't establish connection - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution(self, distribution_id):
try:
func = partial(self.client.get_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_origin_access_identity(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_invalidation(self, distribution_id, invalidation_id):
try:
func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing invalidation - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_streaming_distribution(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_streaming_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_origin_access_identities(self):
try:
func = partial(self.client.list_cloud_front_origin_access_identities)
origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList')
if origin_access_identity_list['Quantity'] > 0:
return origin_access_identity_list['Items']
return {}
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_distributions(self, keyed=True):
try:
func = partial(self.client.list_distributions)
distribution_list = self.paginated_response(func, 'DistributionList')
if distribution_list['Quantity'] == 0:
return {}
else:
distribution_list = distribution_list['Items']
if not keyed:
return distribution_list
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_distributions_by_web_acl_id(self, web_acl_id):
try:
func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id)
distribution_list = self.paginated_response(func, 'DistributionList')
if distribution_list['Quantity'] == 0:
return {}
else:
distribution_list = distribution_list['Items']
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_invalidations(self, distribution_id):
try:
func = partial(self.client.list_invalidations, DistributionId=distribution_id)
invalidation_list = self.paginated_response(func, 'InvalidationList')
if invalidation_list['Quantity'] > 0:
return invalidation_list['Items']
return {}
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing invalidations - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_streaming_distributions(self, keyed=True):
try:
func = partial(self.client.list_streaming_distributions)
streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList')
if streaming_distribution_list['Quantity'] == 0:
return {}
else:
streaming_distribution_list = streaming_distribution_list['Items']
if not keyed:
return streaming_distribution_list
return self.keyed_list_helper(streaming_distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing streaming distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def summary(self):
summary_dict = {}
summary_dict.update(self.summary_get_distribution_list(False))
summary_dict.update(self.summary_get_distribution_list(True))
summary_dict.update(self.summary_get_origin_access_identity_list())
return summary_dict
def summary_get_origin_access_identity_list(self):
try:
origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
for key_name in key_list:
temp_distribution[key_name] = dist[key_name]
temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
if not streaming:
temp_distribution['WebACLId'] = dist['WebACLId']
invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
if invalidation_ids:
temp_distribution['Invalidations'] = invalidation_ids
resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
distribution_list[list_name].append(temp_distribution)
return distribution_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except Exception as e:
self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
exception=traceback.format_exc())
def get_etag_from_distribution_id(self, distribution_id, streaming):
distribution = {}
if not streaming:
distribution = self.get_distribution(distribution_id)
else:
distribution = self.get_streaming_distribution(distribution_id)
return distribution['ETag']
def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
try:
invalidation_ids = []
invalidations = self.list_invalidations(distribution_id)
for invalidation in invalidations:
invalidation_ids.append(invalidation['Id'])
return invalidation_ids
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution_id_from_domain_name(self, domain_name):
try:
distribution_id = ""
distributions = self.list_distributions(False)
distributions += self.list_streaming_distributions(False)
for dist in distributions:
if 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
if str(alias).lower() == domain_name.lower():
distribution_id = dist['Id']
break
return distribution_id
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_aliases_from_distribution_id(self, distribution_id):
aliases = []
try:
distributions = self.list_distributions(False)
for dist in distributions:
if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
aliases.append(alias)
break
return aliases
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def paginated_response(self, func, result_key=""):
'''
Returns expanded response for paginated operations.
The 'result_key' is used to define the concatenated results that are combined from each paginated response.
'''
args = dict()
results = dict()
loop = True
while loop:
response = func(**args)
if result_key == "":
result = response
result.pop('ResponseMetadata', None)
else:
result = response.get(result_key)
results.update(result)
args['Marker'] = response.get('NextMarker')
for key in response.keys():
if key.endswith('List'):
args['Marker'] = response[key].get('NextMarker')
break
loop = args['Marker'] is not None
return results
def keyed_list_helper(self, list_to_key):
keyed_list = dict()
for item in list_to_key:
distribution_id = item['Id']
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
keyed_list.update({alias: item})
keyed_list.update({distribution_id: item})
return keyed_list
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
facts[distribution_id].update(details)
# also have a fixed key for accessing results/details returned
facts['result'] = details
facts['result']['DistributionId'] = distribution_id
for alias in aliases:
facts[alias].update(details)
return facts
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
distribution_id=dict(required=False, type='str'),
invalidation_id=dict(required=False, type='str'),
origin_access_identity_id=dict(required=False, type='str'),
domain_name_alias=dict(required=False, type='str'),
all_lists=dict(required=False, default=False, type='bool'),
distribution=dict(required=False, default=False, type='bool'),
distribution_config=dict(required=False, default=False, type='bool'),
origin_access_identity=dict(required=False, default=False, type='bool'),
origin_access_identity_config=dict(required=False, default=False, type='bool'),
invalidation=dict(required=False, default=False, type='bool'),
streaming_distribution=dict(required=False, default=False, type='bool'),
streaming_distribution_config=dict(required=False, default=False, type='bool'),
list_origin_access_identities=dict(required=False, default=False, type='bool'),
list_distributions=dict(required=False, default=False, type='bool'),
list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
list_invalidations=dict(required=False, default=False, type='bool'),
list_streaming_distributions=dict(required=False, default=False, type='bool'),
summary=dict(required=False, default=False, type='bool')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
is_old_facts = module._name == 'cloudfront_facts'
if is_old_facts:
module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
service_mgr = CloudFrontServiceManager(module)
distribution_id = module.params.get('distribution_id')
invalidation_id = module.params.get('invalidation_id')
origin_access_identity_id = module.params.get('origin_access_identity_id')
web_acl_id = module.params.get('web_acl_id')
domain_name_alias = module.params.get('domain_name_alias')
all_lists = module.params.get('all_lists')
distribution = module.params.get('distribution')
distribution_config = module.params.get('distribution_config')
origin_access_identity = module.params.get('origin_access_identity')
origin_access_identity_config = module.params.get('origin_access_identity_config')
invalidation = module.params.get('invalidation')
streaming_distribution = module.params.get('streaming_distribution')
streaming_distribution_config = module.params.get('streaming_distribution_config')
list_origin_access_identities = module.params.get('list_origin_access_identities')
list_distributions = module.params.get('list_distributions')
list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
list_invalidations = module.params.get('list_invalidations')
list_streaming_distributions = module.params.get('list_streaming_distributions')
summary = module.params.get('summary')
aliases = []
result = {'cloudfront': {}}
facts = {}
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
streaming_distribution_config or list_invalidations)
# set default to summary if no option specified
summary = summary or not (distribution or distribution_config or origin_access_identity or
origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
list_streaming_distributions or list_distributions)
# validations
if require_distribution_id and distribution_id is None and domain_name_alias is None:
module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
if (invalidation and invalidation_id is None):
module.fail_json(msg='Error invalidation_id has not been specified.')
if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
module.fail_json(msg='Error origin_access_identity_id has not been specified.')
if list_distributions_by_web_acl_id and web_acl_id is None:
module.fail_json(msg='Error web_acl_id has not been specified.')
# get distribution id from domain name alias
if require_distribution_id and distribution_id is None:
distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
if not distribution_id:
module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
# set appropriate cloudfront id
if distribution_id and not list_invalidations:
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update({alias: {}})
if invalidation_id:
facts.update({invalidation_id: {}})
elif distribution_id and list_invalidations:
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update({alias: {}})
elif origin_access_identity_id:
facts = {origin_access_identity_id: {}}
elif web_acl_id:
facts = {web_acl_id: {}}
# get details based on options
if distribution:
facts_to_set = service_mgr.get_distribution(distribution_id)
if distribution_config:
facts_to_set = service_mgr.get_distribution_config(distribution_id)
if origin_access_identity:
facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
if origin_access_identity_config:
facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
if invalidation:
facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
facts[invalidation_id].update(facts_to_set)
if streaming_distribution:
facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
if streaming_distribution_config:
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
if list_invalidations:
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
if 'facts_to_set' in vars():
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
# get list based on options
if all_lists or list_origin_access_identities:
facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
if all_lists or list_distributions:
facts['distributions'] = service_mgr.list_distributions()
if all_lists or list_streaming_distributions:
facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
if list_distributions_by_web_acl_id:
facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
if list_invalidations:
facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
# default summary option
if summary:
facts['summary'] = service_mgr.summary()
result['changed'] = False
result['cloudfront'].update(facts)
if is_old_facts:
module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result)
else:
module.exit_json(msg="Retrieved CloudFront info.", **result)
if __name__ == '__main__':
main()

@ -1,276 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_invalidation
short_description: create invalidations for AWS CloudFront distributions
description:
- Allows for invalidation of a batch of paths for a CloudFront distribution.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author: Willem van Ketwich (@wilvk)
extends_documentation_fragment:
- aws
- ec2
options:
distribution_id:
description:
- The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
required: false
type: str
alias:
description:
- The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
required: false
type: str
caller_reference:
description:
- A unique reference identifier for the invalidation paths.
- Defaults to current datetime stamp.
required: false
default:
type: str
target_paths:
description:
- A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*'
required: true
type: list
elements: str
notes:
- does not support check mode
'''
EXAMPLES = '''
- name: create a batch of invalidations using a distribution_id for a reference
cloudfront_invalidation:
distribution_id: E15BU8SDCGSG57
caller_reference: testing 123
target_paths:
- /testpathone/test1.css
- /testpathtwo/test2.js
- /testpaththree/test3.ss
- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
cloudfront_invalidation:
alias: alias.test.com
caller_reference: testing 123
target_paths:
- /testpathone/test4.css
- /testpathtwo/test5.js
- /testpaththree/*
'''
RETURN = '''
invalidation:
description: The invalidation's information.
returned: always
type: complex
contains:
create_time:
description: The date and time the invalidation request was first made.
returned: always
type: str
sample: '2018-02-01T15:50:41.159000+00:00'
id:
description: The identifier for the invalidation request.
returned: always
type: str
sample: I2G9MOWJZFV612
invalidation_batch:
description: The current invalidation information for the batch request.
returned: always
type: complex
contains:
caller_reference:
description: The value used to uniquely identify an invalidation request.
returned: always
type: str
sample: testing 123
paths:
description: A dict that contains information about the objects that you want to invalidate.
returned: always
type: complex
contains:
items:
description: A list of the paths that you want to invalidate.
returned: always
type: list
sample:
- /testpathtwo/test2.js
- /testpathone/test1.css
- /testpaththree/test3.ss
quantity:
description: The number of objects that you want to invalidate.
returned: always
type: int
sample: 3
status:
description: The status of the invalidation request.
returned: always
type: str
sample: Completed
location:
description: The fully qualified URI of the distribution and invalidation batch request.
returned: always
type: str
sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
'''
from ansible.module_utils.ec2 import snake_dict_to_camel_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
import datetime
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by imported AnsibleAWSModule
class CloudFrontInvalidationServiceManager(object):
"""
Handles CloudFront service calls to AWS for invalidations
"""
def __init__(self, module):
self.module = module
self.client = module.client('cloudfront')
def create_invalidation(self, distribution_id, invalidation_batch):
current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
try:
response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
response.pop('ResponseMetadata', None)
if current_invalidation_response:
return response, False
else:
return response, True
except BotoCoreError as e:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
except ClientError as e:
if ('Your request contains a caller reference that was used for a previous invalidation batch '
'for the same distribution.' in e.response['Error']['Message']):
self.module.warn("InvalidationBatch target paths are not modifiable. "
"To make a new invalidation please update caller_reference.")
return current_invalidation_response, False
else:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
def get_invalidation(self, distribution_id, caller_reference):
current_invalidation = {}
# find all invalidations for the distribution
try:
paginator = self.client.get_paginator('list_invalidations')
invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
invalidation_ids = [inv['Id'] for inv in invalidations]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
# check if there is an invalidation with the same caller reference
for inv_id in invalidation_ids:
try:
invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
if caller_ref == caller_reference:
current_invalidation = invalidation
break
current_invalidation.pop('ResponseMetadata', None)
return current_invalidation
class CloudFrontInvalidationValidationManager(object):
"""
Manages CloudFront validations for invalidation batches
"""
def __init__(self, module):
self.module = module
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
def validate_distribution_id(self, distribution_id, alias):
try:
if distribution_id is None and alias is None:
self.module.fail_json(msg="distribution_id or alias must be specified")
if distribution_id is None:
distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
return distribution_id
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating parameters.")
def create_aws_list(self, invalidation_batch):
aws_list = {}
aws_list["Quantity"] = len(invalidation_batch)
aws_list["Items"] = invalidation_batch
return aws_list
def validate_invalidation_batch(self, invalidation_batch, caller_reference):
try:
if caller_reference is not None:
valid_caller_reference = caller_reference
else:
valid_caller_reference = datetime.datetime.now().isoformat()
valid_invalidation_batch = {
'paths': self.create_aws_list(invalidation_batch),
'caller_reference': valid_caller_reference
}
return valid_invalidation_batch
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
def main():
argument_spec = dict(
caller_reference=dict(),
distribution_id=dict(),
alias=dict(),
target_paths=dict(required=True, type='list')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
validation_mgr = CloudFrontInvalidationValidationManager(module)
service_mgr = CloudFrontInvalidationServiceManager(module)
caller_reference = module.params.get('caller_reference')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
target_paths = module.params.get('target_paths')
result = {}
distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()

@ -1,280 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_origin_access_identity
short_description: Create, update and delete origin access identities for a
CloudFront distribution
description:
- Allows for easy creation, updating and deletion of origin access
identities.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author: Willem van Ketwich (@wilvk)
extends_documentation_fragment:
- aws
- ec2
options:
state:
description: If the named resource should exist.
choices:
- present
- absent
default: present
type: str
origin_access_identity_id:
description:
- The origin_access_identity_id of the CloudFront distribution.
required: false
type: str
comment:
description:
- A comment to describe the CloudFront origin access identity.
required: false
type: str
caller_reference:
description:
- A unique identifier to reference the origin access identity by.
required: false
type: str
notes:
- Does not support check mode.
'''
EXAMPLES = '''
- name: create an origin access identity
cloudfront_origin_access_identity:
state: present
caller_reference: this is an example reference
comment: this is an example comment
- name: update an existing origin access identity using caller_reference as an identifier
cloudfront_origin_access_identity:
origin_access_identity_id: E17DRN9XUOAHZX
caller_reference: this is an example reference
comment: this is a new comment
- name: delete an existing origin access identity using caller_reference as an identifier
cloudfront_origin_access_identity:
state: absent
caller_reference: this is an example reference
comment: this is a new comment
'''
RETURN = '''
cloud_front_origin_access_identity:
description: The origin access identity's information.
returned: always
type: complex
contains:
cloud_front_origin_access_identity_config:
description: describes a url specifying the origin access identity.
returned: always
type: complex
contains:
caller_reference:
description: a caller reference for the oai
returned: always
type: str
comment:
description: a comment describing the oai
returned: always
type: str
id:
description: a unique identifier of the oai
returned: always
type: str
s3_canonical_user_id:
description: the canonical user ID of the user who created the oai
returned: always
type: str
e_tag:
description: The current version of the origin access identity created.
returned: always
type: str
location:
description: The fully qualified URI of the new origin access identity just created.
returned: when initially created
type: str
'''
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.core import AnsibleAWSModule
import datetime
from functools import partial
import json
import traceback
try:
import botocore
from botocore.signers import CloudFrontSigner
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by imported AnsibleAWSModule
class CloudFrontOriginAccessIdentityServiceManager(object):
"""
Handles CloudFront origin access identity service calls to aws
"""
def __init__(self, module):
self.module = module
self.client = module.client('cloudfront')
def create_origin_access_identity(self, caller_reference, comment):
try:
return self.client.create_cloud_front_origin_access_identity(
CloudFrontOriginAccessIdentityConfig={
'CallerReference': caller_reference,
'Comment': comment
}
)
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.")
def delete_origin_access_identity(self, origin_access_identity_id, e_tag):
try:
return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag):
changed = False
new_config = {
'CallerReference': caller_reference,
'Comment': comment
}
try:
current_config = self.client.get_cloud_front_origin_access_identity_config(
Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig']
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.")
if new_config != current_config:
changed = True
try:
# If the CallerReference is a value already sent in a previous identity request
# the returned value is that of the original request
result = self.client.update_cloud_front_origin_access_identity(
CloudFrontOriginAccessIdentityConfig=new_config,
Id=origin_access_identity_id,
IfMatch=e_tag,
)
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
return result, changed
class CloudFrontOriginAccessIdentityValidationManager(object):
"""
Manages CloudFront Origin Access Identities
"""
def __init__(self, module):
self.module = module
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id):
try:
if origin_access_identity_id is None:
return
oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id)
if oai is not None:
return oai.get('ETag')
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
def validate_origin_access_identity_id_from_caller_reference(
self, caller_reference):
try:
origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities]
for origin_access_identity_id in origin_origin_access_identity_ids:
oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id))
temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference')
if temp_caller_reference == caller_reference:
return origin_access_identity_id
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.")
def validate_comment(self, comment):
if comment is None:
return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
return comment
def main():
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
origin_access_identity_id=dict(),
caller_reference=dict(),
comment=dict(),
)
result = {}
e_tag = None
changed = False
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
service_mgr = CloudFrontOriginAccessIdentityServiceManager(module)
validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module)
state = module.params.get('state')
caller_reference = module.params.get('caller_reference')
comment = module.params.get('comment')
origin_access_identity_id = module.params.get('origin_access_identity_id')
if origin_access_identity_id is None and caller_reference is not None:
origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference)
e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id)
comment = validation_mgr.validate_comment(comment)
if state == 'present':
if origin_access_identity_id is not None and e_tag is not None:
result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag)
else:
result = service_mgr.create_origin_access_identity(caller_reference, comment)
changed = True
elif(state == 'absent' and origin_access_identity_id is not None and
e_tag is not None):
result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
changed = True
result.pop('ResponseMetadata', None)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()

@ -1,618 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudtrail
short_description: manage CloudTrail create, delete, update
description:
- Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- Ansible Core Team
- Ted Timmons (@tedder)
- Daniel Shepherd (@shepdelacreme)
requirements:
- boto3
- botocore
options:
state:
description:
- Add or remove CloudTrail configuration.
- 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).'
- I(state=enabled) is equivalet to I(state=present).
- I(state=disabled) is equivalet to I(state=absent).
type: str
choices: ['present', 'absent', 'enabled', 'disabled']
default: present
name:
description:
- Name for the CloudTrail.
- Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
type: str
default: default
enable_logging:
description:
- Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
default: true
type: bool
version_added: "2.4"
s3_bucket_name:
description:
- An existing S3 bucket where CloudTrail will deliver log files.
- This bucket should exist and have the proper policy.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html).
- Required when I(state=present).
type: str
version_added: "2.4"
s3_key_prefix:
description:
- S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
type: str
is_multi_region_trail:
description:
- Specify whether the trail belongs only to one region or exists in all regions.
default: false
type: bool
version_added: "2.4"
enable_log_file_validation:
description:
- Specifies whether log file integrity validation is enabled.
- CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
version_added: "2.4"
type: bool
aliases: [ "log_file_validation_enabled" ]
include_global_events:
description:
- Record API calls from global services such as IAM and STS.
default: true
type: bool
aliases: [ "include_global_service_events" ]
sns_topic_name:
description:
- SNS Topic name to send notifications to when a log file is delivered.
version_added: "2.4"
type: str
cloudwatch_logs_role_arn:
description:
- Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- Required when C(cloudwatch_logs_log_group_arn).
version_added: "2.4"
type: str
cloudwatch_logs_log_group_arn:
description:
- A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- Required when C(cloudwatch_logs_role_arn).
type: str
version_added: "2.4"
kms_key_id:
description:
- Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html).
type: str
version_added: "2.4"
tags:
description:
- A hash/dictionary of tags to be applied to the CloudTrail resource.
- Remove completely or specify an empty dictionary to remove all tags.
default: {}
version_added: "2.4"
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: create single region cloudtrail
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: create multi-region trail with validation and tags
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
kms_key_id: "alias/MyAliasName"
tags:
environment: dev
Name: default
- name: show another valid kms_key_id
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
# simply "12345678-1234-1234-1234-123456789012" would be valid too.
- name: pause logging the trail we just created
cloudtrail:
state: present
name: default
enable_logging: false
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: delete a trail
cloudtrail:
state: absent
name: default
'''
RETURN = '''
exists:
description: whether the resource exists
returned: always
type: bool
sample: true
trail:
description: CloudTrail resource details
returned: always
type: complex
sample: hash/dictionary of values
contains:
trail_arn:
description: Full ARN of the CloudTrail resource
returned: success
type: str
sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
name:
description: Name of the CloudTrail resource
returned: success
type: str
sample: default
is_logging:
description: Whether logging is turned on or paused for the Trail
returned: success
type: bool
sample: True
s3_bucket_name:
description: S3 bucket name where log files are delivered
returned: success
type: str
sample: myBucket
s3_key_prefix:
description: Key prefix in bucket where log files are delivered (if any)
returned: success when present
type: str
sample: myKeyPrefix
log_file_validation_enabled:
description: Whether log file validation is enabled on the trail
returned: success
type: bool
sample: true
include_global_service_events:
description: Whether global services (IAM, STS) are logged with this trail
returned: success
type: bool
sample: true
is_multi_region_trail:
description: Whether the trail applies to all regions or just one
returned: success
type: bool
sample: true
has_custom_event_selectors:
description: Whether any custom event selectors are used for this trail.
returned: success
type: bool
sample: False
home_region:
description: The home region where the trail was originally created and must be edited.
returned: success
type: str
sample: us-east-1
sns_topic_name:
description: The SNS topic name where log delivery notifications are sent.
returned: success when present
type: str
sample: myTopic
sns_topic_arn:
description: Full ARN of the SNS topic where log delivery notifications are sent.
returned: success when present
type: str
sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
cloud_watch_logs_log_group_arn:
description: Full ARN of the CloudWatch Logs log group where events are delivered.
returned: success when present
type: str
sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
cloud_watch_logs_role_arn:
description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
returned: success when present
type: str
sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
kms_key_id:
description: Full ARN of the KMS Key used to encrypt log files.
returned: success when present
type: str
sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
tags:
description: hash/dictionary of tags applied to this resource
returned: success
type: dict
sample: {'environment': 'dev', 'Name': 'default'}
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict,
ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
def create_trail(module, client, ct_params):
"""
Creates a CloudTrail
module : AnsibleAWSModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to create
"""
resp = {}
try:
resp = client.create_trail(**ct_params)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to create Trail")
return resp
def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
"""
Creates, updates, removes tags on a CloudTrail resource
module : AnsibleAWSModule object
client : boto3 client connection object
tags : Dict of tags converted from ansible_dict to boto3 list of dicts
trail_arn : The ARN of the CloudTrail to operate on
curr_tags : Dict of the current tags on resource, if any
dry_run : true/false to determine if changes will be made if needed
"""
adds = []
removes = []
updates = []
changed = False
if curr_tags is None:
# No current tags so just convert all to a tag list
adds = ansible_dict_to_boto3_tag_list(tags)
else:
curr_keys = set(curr_tags.keys())
new_keys = set(tags.keys())
add_keys = new_keys - curr_keys
remove_keys = curr_keys - new_keys
update_keys = dict()
for k in curr_keys.intersection(new_keys):
if curr_tags[k] != tags[k]:
update_keys.update({k: tags[k]})
adds = get_tag_list(add_keys, tags)
removes = get_tag_list(remove_keys, curr_tags)
updates = get_tag_list(update_keys, tags)
if removes or updates:
changed = True
if not dry_run:
try:
client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to remove tags from Trail")
if updates or adds:
changed = True
if not dry_run:
try:
client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to add tags to Trail")
return changed
def get_tag_list(keys, tags):
"""
Returns a list of dicts with tags to act on
keys : set of keys to get the values for
tags : the dict of tags to turn into a list
"""
tag_list = []
for k in keys:
tag_list.append({'Key': k, 'Value': tags[k]})
return tag_list
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
module : AnsibleAWSModule object
client : boto3 client connection object
name : The name or ARN of the CloudTrail to operate on
action : start or stop
"""
if action == 'start':
try:
client.start_logging(Name=name)
return client.get_trail_status(Name=name)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to start logging")
elif action == 'stop':
try:
client.stop_logging(Name=name)
return client.get_trail_status(Name=name)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to stop logging")
else:
module.fail_json(msg="Unsupported logging action")
def get_trail_facts(module, client, name):
"""
Describes existing trail in an account
module : AnsibleAWSModule object
client : boto3 client connection object
name : Name of the trail
"""
# get Trail info
try:
trail_resp = client.describe_trails(trailNameList=[name])
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to describe Trail")
# Now check to see if our trail exists and get status and tags
if len(trail_resp['trailList']):
trail = trail_resp['trailList'][0]
try:
status_resp = client.get_trail_status(Name=trail['Name'])
tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to describe Trail")
trail['IsLogging'] = status_resp['IsLogging']
trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
# Check for non-existent values and populate with None
optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
for v in optional_vals - set(trail.keys()):
trail[v] = None
return trail
else:
# trail doesn't exist return None
return None
def delete_trail(module, client, trail_arn):
"""
Delete a CloudTrail
module : AnsibleAWSModule object
client : boto3 client connection object
trail_arn : Full CloudTrail ARN
"""
try:
client.delete_trail(Name=trail_arn)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to delete Trail")
def update_trail(module, client, ct_params):
"""
Delete a CloudTrail
module : AnsibleAWSModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to update
"""
try:
client.update_trail(**ct_params)
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to update Trail")
def main():
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
name=dict(default='default'),
enable_logging=dict(default=True, type='bool'),
s3_bucket_name=dict(),
s3_key_prefix=dict(),
sns_topic_name=dict(),
is_multi_region_trail=dict(default=False, type='bool'),
enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
cloudwatch_logs_role_arn=dict(),
cloudwatch_logs_log_group_arn=dict(),
kms_key_id=dict(),
tags=dict(default={}, type='dict'),
)
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
# collect parameters
if module.params['state'] in ('present', 'enabled'):
state = 'present'
elif module.params['state'] in ('absent', 'disabled'):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(
Name=module.params['name'],
S3BucketName=module.params['s3_bucket_name'],
IncludeGlobalServiceEvents=module.params['include_global_events'],
IsMultiRegionTrail=module.params['is_multi_region_trail'],
)
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['enable_log_file_validation'] is not None:
ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
client = module.client('cloudtrail')
region = module.region
results = dict(
changed=False,
exists=False
)
# Get existing trail facts
trail = get_trail_facts(module, client, ct_params['Name'])
# If the trail exists set the result exists variable
if trail is not None:
results['exists'] = True
if state == 'absent' and results['exists']:
# If Trail exists go ahead and delete
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if not module.check_mode:
delete_trail(module, client, trail['TrailARN'])
elif state == 'present' and results['exists']:
# If Trail exists see if we need to update it
do_update = False
for key in ct_params:
tkey = str(key)
# boto3 has inconsistent parameter naming so we handle it here
if key == 'EnableLogFileValidation':
tkey = 'LogFileValidationEnabled'
# We need to make an empty string equal None
if ct_params.get(key) == '':
val = None
else:
val = ct_params.get(key)
if val != trail.get(tkey):
do_update = True
results['changed'] = True
# If we are in check mode copy the changed values to the trail facts in result output to show what would change.
if module.check_mode:
trail.update({tkey: ct_params.get(key)})
if not module.check_mode and do_update:
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
# Check if we need to start/stop logging
if enable_logging and not trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = True
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = False
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Check if we need to update tags on resource
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
elif state == 'present' and not results['exists']:
# Trail doesn't exist just go create it
results['changed'] = True
if not module.check_mode:
# If we aren't in check_mode then actually create it
created_trail = create_trail(module, client, ct_params)
# Apply tags
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
# Get the trail status
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except (BotoCoreError, ClientError) as err:
module.fail_json_aws(err, msg="Failed to fetch Trail statuc")
# Set the logging state for the trail to desired value
if enable_logging and not status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Get facts for newly created Trail
trail = get_trail_facts(module, client, ct_params['Name'])
# If we are in check mode create a fake return structure for the newly minted trail
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = module.client('sts')
acct_id = sts_client.get_caller_identity()['Account']
except (BotoCoreError, ClientError):
pass
trail = dict()
trail.update(ct_params)
if 'EnableLogFileValidation' not in ct_params:
ct_params['EnableLogFileValidation'] = False
trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
if __name__ == '__main__':
main()

@ -1,464 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cloudwatchevent_rule
short_description: Manage CloudWatch Event rules and targets
description:
- This module creates and manages CloudWatch event rules and targets.
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
requirements:
- python >= 2.6
- boto3
notes:
- A rule must contain at least an I(event_pattern) or I(schedule_expression). A
rule can have both an I(event_pattern) and a I(schedule_expression), in which
case the rule will trigger on matching events as well as on a schedule.
- When specifying targets, I(input) and I(input_path) are mutually-exclusive
and optional parameters.
options:
name:
description:
- The name of the rule you are creating, updating or deleting. No spaces
or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)).
required: true
type: str
schedule_expression:
description:
- A cron or rate expression that defines the schedule the rule will
trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)).
required: false
type: str
event_pattern:
description:
- A string pattern (in valid JSON format) that is used to match against
incoming events to determine if the rule should be triggered.
required: false
type: str
state:
description:
- Whether the rule is present (and enabled), disabled, or absent.
choices: ["present", "disabled", "absent"]
default: present
required: false
type: str
description:
description:
- A description of the rule.
required: false
type: str
role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role associated with the rule.
required: false
type: str
targets:
type: list
elements: dict
description:
- A list of targets to add to or update for the rule.
suboptions:
id:
type: str
required: true
description: The unique target assignment ID.
arn:
type: str
required: true
description: The ARN associated with the target.
role_arn:
type: str
description: The ARN of the IAM role to be used for this target when the rule is triggered.
input:
type: str
description:
- A JSON object that will override the event data when passed to the target.
- If neither I(input) nor I(input_path) is specified, then the entire
event is passed to the target in JSON form.
input_path:
type: str
description:
- A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be
passed to the target.
- If neither I(input) nor I(input_path) is specified, then the entire
event is passed to the target in JSON form.
ecs_parameters:
type: dict
description:
- Contains the ECS task definition and task count to be used, if the event target is an ECS task.
suboptions:
task_definition_arn:
type: str
description: The full ARN of the task definition.
task_count:
type: int
description: The number of tasks to create based on I(task_definition).
required: false
'''
EXAMPLES = '''
- cloudwatchevent_rule:
name: MyCronTask
schedule_expression: "cron(0 20 * * ? *)"
description: Run my scheduled task
targets:
- id: MyTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
- cloudwatchevent_rule:
name: MyDisabledCronTask
schedule_expression: "rate(5 minutes)"
description: Run my disabled scheduled task
state: disabled
targets:
- id: MyOtherTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
input: '{"foo": "bar"}'
- cloudwatchevent_rule:
name: MyCronTask
state: absent
'''
RETURN = '''
rule:
description: CloudWatch Event rule data.
returned: success
type: dict
sample:
arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask'
description: 'Run my scheduled task'
name: 'MyCronTask'
schedule_expression: 'cron(0 20 * * ? *)'
state: 'ENABLED'
targets:
description: CloudWatch Event target(s) assigned to the rule.
returned: success
type: list
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class CloudWatchEventRule(object):
def __init__(self, module, name, client, schedule_expression=None,
event_pattern=None, description=None, role_arn=None):
self.name = name
self.client = client
self.changed = False
self.schedule_expression = schedule_expression
self.event_pattern = event_pattern
self.description = description
self.role_arn = role_arn
self.module = module
def describe(self):
"""Returns the existing details of the rule in AWS"""
try:
rule_info = self.client.describe_rule(Name=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return {}
self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
except botocore.exceptions.BotoCoreError as e:
self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
return self._snakify(rule_info)
def put(self, enabled=True):
"""Creates or updates the rule in AWS"""
request = {
'Name': self.name,
'State': "ENABLED" if enabled else "DISABLED",
}
if self.schedule_expression:
request['ScheduleExpression'] = self.schedule_expression
if self.event_pattern:
request['EventPattern'] = self.event_pattern
if self.description:
request['Description'] = self.description
if self.role_arn:
request['RoleArn'] = self.role_arn
try:
response = self.client.put_rule(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name)
self.changed = True
return response
def delete(self):
"""Deletes the rule in AWS"""
self.remove_all_targets()
try:
response = self.client.delete_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name)
self.changed = True
return response
def enable(self):
"""Enables the rule in AWS"""
try:
response = self.client.enable_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name)
self.changed = True
return response
def disable(self):
"""Disables the rule in AWS"""
try:
response = self.client.disable_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name)
self.changed = True
return response
def list_targets(self):
"""Lists the existing targets for the rule in AWS"""
try:
targets = self.client.list_targets_by_rule(Rule=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return []
self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
except botocore.exceptions.BotoCoreError as e:
self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
return self._snakify(targets)['targets']
def put_targets(self, targets):
"""Creates or updates the provided targets on the rule in AWS"""
if not targets:
return
request = {
'Rule': self.name,
'Targets': self._targets_request(targets),
}
try:
response = self.client.put_targets(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name)
self.changed = True
return response
def remove_targets(self, target_ids):
"""Removes the provided targets from the rule in AWS"""
if not target_ids:
return
request = {
'Rule': self.name,
'Ids': target_ids
}
try:
response = self.client.remove_targets(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name)
self.changed = True
return response
def remove_all_targets(self):
"""Removes all targets on rule"""
targets = self.list_targets()
return self.remove_targets([t['id'] for t in targets])
def _targets_request(self, targets):
"""Formats each target for the request"""
targets_request = []
for target in targets:
target_request = {
'Id': target['id'],
'Arn': target['arn']
}
if 'input' in target:
target_request['Input'] = target['input']
if 'input_path' in target:
target_request['InputPath'] = target['input_path']
if 'role_arn' in target:
target_request['RoleArn'] = target['role_arn']
if 'ecs_parameters' in target:
target_request['EcsParameters'] = {}
ecs_parameters = target['ecs_parameters']
if 'task_definition_arn' in target['ecs_parameters']:
target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn']
if 'task_count' in target['ecs_parameters']:
target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count']
targets_request.append(target_request)
return targets_request
def _snakify(self, dict):
"""Converts camel case to snake case"""
return camel_dict_to_snake_dict(dict)
class CloudWatchEventRuleManager(object):
RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
def __init__(self, rule, targets):
self.rule = rule
self.targets = targets
def ensure_present(self, enabled=True):
"""Ensures the rule and targets are present and synced"""
rule_description = self.rule.describe()
if rule_description:
# Rule exists so update rule, targets and state
self._sync_rule(enabled)
self._sync_targets()
self._sync_state(enabled)
else:
# Rule does not exist, so create new rule and targets
self._create(enabled)
def ensure_disabled(self):
"""Ensures the rule and targets are present, but disabled, and synced"""
self.ensure_present(enabled=False)
def ensure_absent(self):
"""Ensures the rule and targets are absent"""
rule_description = self.rule.describe()
if not rule_description:
# Rule doesn't exist so don't need to delete
return
self.rule.delete()
def fetch_aws_state(self):
"""Retrieves rule and target state from AWS"""
aws_state = {
'rule': {},
'targets': [],
'changed': self.rule.changed
}
rule_description = self.rule.describe()
if not rule_description:
return aws_state
# Don't need to include response metadata noise in response
del rule_description['response_metadata']
aws_state['rule'] = rule_description
aws_state['targets'].extend(self.rule.list_targets())
return aws_state
def _sync_rule(self, enabled=True):
"""Syncs local rule state with AWS"""
if not self._rule_matches_aws():
self.rule.put(enabled)
def _sync_targets(self):
"""Syncs local targets with AWS"""
# Identify and remove extraneous targets on AWS
target_ids_to_remove = self._remote_target_ids_to_remove()
if target_ids_to_remove:
self.rule.remove_targets(target_ids_to_remove)
# Identify targets that need to be added or updated on AWS
targets_to_put = self._targets_to_put()
if targets_to_put:
self.rule.put_targets(targets_to_put)
def _sync_state(self, enabled=True):
"""Syncs local rule state with AWS"""
remote_state = self._remote_state()
if enabled and remote_state != 'ENABLED':
self.rule.enable()
elif not enabled and remote_state != 'DISABLED':
self.rule.disable()
def _create(self, enabled=True):
"""Creates rule and targets on AWS"""
self.rule.put(enabled)
self.rule.put_targets(self.targets)
def _rule_matches_aws(self):
"""Checks if the local rule data matches AWS"""
aws_rule_data = self.rule.describe()
# The rule matches AWS only if all rule data fields are equal
# to their corresponding local value defined in the task
return all([
getattr(self.rule, field) == aws_rule_data.get(field, None)
for field in self.RULE_FIELDS
])
def _targets_to_put(self):
"""Returns a list of targets that need to be updated or added remotely"""
remote_targets = self.rule.list_targets()
return [t for t in self.targets if t not in remote_targets]
def _remote_target_ids_to_remove(self):
"""Returns a list of targets that need to be removed remotely"""
target_ids = [t['id'] for t in self.targets]
remote_targets = self.rule.list_targets()
return [
rt['id'] for rt in remote_targets if rt['id'] not in target_ids
]
def _remote_state(self):
"""Returns the remote state from AWS"""
description = self.rule.describe()
if not description:
return
return description['state']
def main():
argument_spec = dict(
name=dict(required=True),
schedule_expression=dict(),
event_pattern=dict(),
state=dict(choices=['present', 'disabled', 'absent'],
default='present'),
description=dict(),
role_arn=dict(),
targets=dict(type='list', default=[]),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
rule_data = dict(
[(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
)
targets = module.params.get('targets')
state = module.params.get('state')
client = module.client('events')
cwe_rule = CloudWatchEventRule(module, client=client, **rule_data)
cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
if state == 'present':
cwe_rule_manager.ensure_present()
elif state == 'disabled':
cwe_rule_manager.ensure_disabled()
elif state == 'absent':
cwe_rule_manager.ensure_absent()
else:
module.fail_json(msg="Invalid state '{0}' provided".format(state))
module.exit_json(**cwe_rule_manager.fetch_aws_state())
if __name__ == '__main__':
main()

@ -1,319 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group
short_description: create or delete log_group in CloudWatchLogs
notes:
- For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html).
description:
- Create or delete log_group in CloudWatchLogs.
version_added: "2.5"
author:
- Willian Ricardo (@willricardo) <willricardo@gmail.com>
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- Whether the rule is present or absent.
choices: ["present", "absent"]
default: present
required: false
type: str
log_group_name:
description:
- The name of the log group.
required: true
type: str
kms_key_id:
description:
- The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
required: false
type: str
tags:
description:
- The key-value pairs to use for the tags.
required: false
type: dict
retention:
description:
- The number of days to retain the log events in the specified log group.
- "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
- Mutually exclusive with I(purge_retention_policy).
required: false
type: int
purge_retention_policy:
description:
- "Whether to purge the retention policy or not."
- "Mutually exclusive with I(retention) and I(overwrite)."
default: false
required: false
type: bool
version_added: "2.10"
overwrite:
description:
- Whether an existing log group should be overwritten on create.
- Mutually exclusive with I(purge_retention_policy).
default: false
required: false
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group:
log_group_name: test-log-group
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
kms_key_id: arn:aws:kms:region:account-id:key/key-id
- cloudwatchlogs_log_group:
state: absent
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objects representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: str
creation_time:
description: The creation time of the log group.
returned: always
type: int
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: int
metric_filter_count:
description: The number of metric filters.
returned: always
type: int
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: str
stored_bytes:
description: The number of bytes stored.
returned: always
type: str
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
request = {'logGroupName': log_group_name}
if kms_key_id:
request['kmsKeyId'] = kms_key_id
if tags:
request['tags'] = tags
try:
client.create_log_group(**request)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc())
if retention:
input_retention_policy(client=client,
log_group_name=log_group_name,
retention=retention, module=module)
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
return i
module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
def input_retention_policy(client, log_group_name, retention, module):
try:
permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
if retention in permited_values:
response = client.put_retention_policy(logGroupName=log_group_name,
retentionInDays=retention)
else:
delete_log_group(client=client, log_group_name=log_group_name, module=module)
module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_retention_policy(client, log_group_name, module):
try:
client.delete_retention_policy(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_log_group(client, log_group_name, module):
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
try:
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
client.delete_log_group(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def describe_log_group(client, log_group_name, module):
try:
desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'],
default='present'),
kms_key_id=dict(required=False, type='str'),
tags=dict(required=False, type='dict'),
retention=dict(required=False, type='int'),
purge_retention_policy=dict(required=False, type='bool', default=False),
overwrite=dict(required=False, type='bool', default=False)
))
mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
changed = False
# Determine if the log group exists
desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = {}
for i in desc_log_group.get('logGroups', []):
if module.params['log_group_name'] == i['logGroupName']:
found_log_group = i
break
if state == 'present':
if found_log_group:
if module.params['overwrite'] is True:
changed = True
delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
elif module.params['purge_retention_policy']:
if found_log_group.get('retentionInDays'):
changed = True
delete_retention_policy(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
elif module.params['retention'] != found_log_group.get('retentionInDays'):
if module.params['retention'] is not None:
changed = True
input_retention_policy(client=logs,
log_group_name=module.params['log_group_name'],
retention=module.params['retention'],
module=module)
found_log_group['retentionInDays'] = module.params['retention']
elif not found_log_group:
changed = True
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
elif state == 'absent':
if found_log_group:
changed = True
delete_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

@ -1,132 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group_info
short_description: Get information about log_group in CloudWatchLogs
description:
- Lists the specified log groups. You can list all your log groups or filter the results by prefix.
- This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author:
- Willian Ricardo (@willricardo) <willricardo@gmail.com>
requirements: [ botocore, boto3 ]
options:
log_group_name:
description:
- The name or prefix of the log group to filter by.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group_info:
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objects representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: str
creation_time:
description: The creation time of the log group.
returned: always
type: int
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: int
metric_filter_count:
description: The number of metric filters.
returned: always
type: int
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: str
stored_bytes:
description: The number of bytes stored.
returned: always
type: str
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def describe_log_group(client, log_group_name, module):
params = {}
if log_group_name:
params['logGroupNamePrefix'] = log_group_name
try:
paginator = client.get_paginator('describe_log_groups')
desc_log_group = paginator.paginate(**params).build_full_result()
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'cloudwatchlogs_log_group_facts':
module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
desc_log_group = describe_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
final_log_group_snake = []
for log_group in desc_log_group['logGroups']:
final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
module.exit_json(**desc_log_group_result)
if __name__ == '__main__':
main()

@ -1,221 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group_metric_filter
version_added: "2.10"
author:
- "Markus Bergholz (@markuman)"
short_description: Manage CloudWatch log group metric filter
description:
- Create, modify and delete CloudWatch log group metric filter.
- CloudWatch log group metric filter can be use with M(ec2_metric_alarm).
requirements:
- boto3
- botocore
options:
state:
description:
- Whether the rule is present or absent.
choices: ["present", "absent"]
required: true
type: str
log_group_name:
description:
- The name of the log group where the metric filter is applied on.
required: true
type: str
filter_name:
description:
- A name for the metric filter you create.
required: true
type: str
filter_pattern:
description:
- A filter pattern for extracting metric data out of ingested log events. Required when I(state=present).
type: str
metric_transformation:
description:
- A collection of information that defines how metric data gets emitted. Required when I(state=present).
type: dict
suboptions:
metric_name:
description:
- The name of the cloudWatch metric.
type: str
metric_namespace:
description:
- The namespace of the cloudWatch metric.
type: str
metric_value:
description:
- The value to publish to the cloudWatch metric when a filter pattern matches a log event.
type: str
default_value:
description:
- The value to emit when a filter pattern does not match a log event.
type: float
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: set metric filter on log group /fluentd/testcase
cloudwatchlogs_log_group_metric_filter:
log_group_name: /fluentd/testcase
filter_name: BoxFreeStorage
filter_pattern: '{($.value = *) && ($.hostname = "box")}'
state: present
metric_transformation:
metric_name: box_free_space
metric_namespace: fluentd_metrics
metric_value: "$.value"
- name: delete metric filter on log group /fluentd/testcase
cloudwatchlogs_log_group_metric_filter:
log_group_name: /fluentd/testcase
filter_name: BoxFreeStorage
state: absent
'''
RETURN = """
metric_filters:
description: Return the origin response value
returned: success
type: list
contains:
creation_time:
filter_name:
filter_pattern:
log_group_name:
metric_filter_count:
"""
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError, WaiterError
except ImportError:
pass # caught by AnsibleAWSModule
def metricTransformationHandler(metricTransformations, originMetricTransformations=None):
if originMetricTransformations:
change = False
originMetricTransformations = camel_dict_to_snake_dict(
originMetricTransformations)
for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]:
if metricTransformations.get(item) != originMetricTransformations.get(item):
change = True
else:
change = True
defaultValue = metricTransformations.get("default_value")
if isinstance(defaultValue, int) or isinstance(defaultValue, float):
retval = [
{
'metricName': metricTransformations.get("metric_name"),
'metricNamespace': metricTransformations.get("metric_namespace"),
'metricValue': metricTransformations.get("metric_value"),
'defaultValue': defaultValue
}
]
else:
retval = [
{
'metricName': metricTransformations.get("metric_name"),
'metricNamespace': metricTransformations.get("metric_namespace"),
'metricValue': metricTransformations.get("metric_value"),
}
]
return retval, change
def main():
arg_spec = dict(
state=dict(type='str', required=True, choices=['present', 'absent']),
log_group_name=dict(type='str', required=True),
filter_name=dict(type='str', required=True),
filter_pattern=dict(type='str'),
metric_transformation=dict(type='dict', options=dict(
metric_name=dict(type='str'),
metric_namespace=dict(type='str'),
metric_value=dict(type='str'),
default_value=dict(type='float')
)),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
supports_check_mode=True,
required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])]
)
log_group_name = module.params.get("log_group_name")
filter_name = module.params.get("filter_name")
filter_pattern = module.params.get("filter_pattern")
metric_transformation = module.params.get("metric_transformation")
state = module.params.get("state")
cwl = module.client('logs')
# check if metric filter exists
response = cwl.describe_metric_filters(
logGroupName=log_group_name,
filterNamePrefix=filter_name
)
if len(response.get("metricFilters")) == 1:
originMetricTransformations = response.get(
"metricFilters")[0].get("metricTransformations")[0]
originFilterPattern = response.get("metricFilters")[
0].get("filterPattern")
else:
originMetricTransformations = None
originFilterPattern = None
change = False
metricTransformation = None
if state == "absent" and originMetricTransformations:
if not module.check_mode:
response = cwl.delete_metric_filter(
logGroupName=log_group_name,
filterName=filter_name
)
change = True
metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]]
elif state == "present":
metricTransformation, change = metricTransformationHandler(
metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations)
change = change or filter_pattern != originFilterPattern
if change:
if not module.check_mode:
response = cwl.put_metric_filter(
logGroupName=log_group_name,
filterName=filter_name,
filterPattern=filter_pattern,
metricTransformations=metricTransformation
)
metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation]
module.exit_json(changed=change, metric_filters=metricTransformation)
if __name__ == '__main__':
main()

@ -1,652 +0,0 @@
#!/usr/bin/python
#
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: data_pipeline
version_added: "2.4"
author:
- Raghu Udiyar (@raags) <raghusiddarth@gmail.com>
- Sloane Hertel (@s-hertel) <shertel@redhat.com>
requirements: [ "boto3" ]
short_description: Create and manage AWS Datapipelines
extends_documentation_fragment:
- aws
- ec2
description:
- Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects)
given to the datapipeline.
- The pipeline definition must be in the format given here
U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax).
- Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state.
options:
name:
description:
- The name of the Datapipeline to create/modify/delete.
required: true
type: str
description:
description:
- An optional description for the pipeline being created.
default: ''
type: str
objects:
type: list
elements: dict
description:
- A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields).
suboptions:
id:
description:
- The ID of the object.
type: str
name:
description:
- The name of the object.
type: str
fields:
description:
- Key-value pairs that define the properties of the object.
- The value is specified as a reference to another object I(refValue) or as a string value I(stringValue)
but not as both.
type: list
elements: dict
suboptions:
key:
type: str
description:
- The field identifier.
stringValue:
type: str
description:
- The field value.
- Exactly one of I(stringValue) and I(refValue) may be specified.
refValue:
type: str
description:
- The field value, expressed as the identifier of another object.
- Exactly one of I(stringValue) and I(refValue) may be specified.
parameters:
description:
- A list of parameter objects (dicts) in the pipeline definition.
type: list
elements: dict
suboptions:
id:
description:
- The ID of the parameter object.
attributes:
description:
- A list of attributes (dicts) of the parameter object.
type: list
elements: dict
suboptions:
key:
description: The field identifier.
type: str
stringValue:
description: The field value.
type: str
values:
description:
- A list of parameter values (dicts) in the pipeline definition.
type: list
elements: dict
suboptions:
id:
description: The ID of the parameter value
type: str
stringValue:
description: The field value
type: str
timeout:
description:
- Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise.
default: 300
type: int
state:
description:
- The requested state of the pipeline.
choices: ['present', 'absent', 'active', 'inactive']
default: present
type: str
tags:
description:
- A dict of key:value pair(s) to add to the pipeline.
type: dict
version:
description:
- The version option has never had any effect and will be removed in
Ansible 2.14
type: str
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create pipeline
- data_pipeline:
name: test-dp
region: us-west-2
objects: "{{pipelineObjects}}"
parameters: "{{pipelineParameters}}"
values: "{{pipelineValues}}"
tags:
key1: val1
key2: val2
state: present
# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects
- data_pipeline:
name: test-dp
objects:
- "id": "DefaultSchedule"
"name": "Every 1 day"
"fields":
- "key": "period"
"stringValue": "1 days"
- "key": "type"
"stringValue": "Schedule"
- "key": "startAt"
"stringValue": "FIRST_ACTIVATION_DATE_TIME"
- "id": "Default"
"name": "Default"
"fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
{ "key": "role", "stringValue": "DataPipelineDefaultRole" },
{ "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
{ "key": "scheduleType", "stringValue": "cron" },
{ "key": "schedule", "refValue": "DefaultSchedule" },
{ "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
state: active
# Activate pipeline
- data_pipeline:
name: test-dp
region: us-west-2
state: active
# Delete pipeline
- data_pipeline:
name: test-dp
region: us-west-2
state: absent
'''
RETURN = '''
changed:
description: whether the data pipeline has been modified
type: bool
returned: always
sample:
changed: true
result:
description:
- Contains the data pipeline data (data_pipeline) and a return message (msg).
If the data pipeline exists data_pipeline will contain the keys description, name,
pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then
data_pipeline will be an empty dict. The msg describes the status of the operation.
returned: always
type: dict
'''
import hashlib
import json
import time
import traceback
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils._text import to_text
DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED']
DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
DP_ACTIVATING_STATE = 'ACTIVATING'
DP_DEACTIVATING_STATE = 'DEACTIVATING'
PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$'
class DataPipelineNotFound(Exception):
pass
class TimeOutException(Exception):
pass
def pipeline_id(client, name):
"""Return pipeline id for the given pipeline name
:param object client: boto3 datapipeline client
:param string name: pipeline name
:returns: pipeline id
:raises: DataPipelineNotFound
"""
pipelines = client.list_pipelines()
for dp in pipelines['pipelineIdList']:
if dp['name'] == name:
return dp['id']
raise DataPipelineNotFound
def pipeline_description(client, dp_id):
"""Return pipeline description list
:param object client: boto3 datapipeline client
:returns: pipeline description dictionary
:raises: DataPipelineNotFound
"""
try:
return client.describe_pipelines(pipelineIds=[dp_id])
except ClientError as e:
raise DataPipelineNotFound
def pipeline_field(client, dp_id, field):
"""Return a pipeline field from the pipeline description.
The available fields are listed in describe_pipelines output.
:param object client: boto3 datapipeline client
:param string dp_id: pipeline id
:param string field: pipeline description field
:returns: pipeline field information
"""
dp_description = pipeline_description(client, dp_id)
for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
if field_key['key'] == field:
return field_key['stringValue']
raise KeyError("Field key {0} not found!".format(field))
def run_with_timeout(timeout, func, *func_args, **func_kwargs):
"""Run func with the provided args and kwargs, and wait utill
timeout for truthy return value
:param int timeout: time to wait for status
:param function func: function to run, should return True or False
:param args func_args: function args to pass to func
:param kwargs func_kwargs: function key word args
:returns: True if func returns truthy within timeout
:raises: TimeOutException
"""
for count in range(timeout // 10):
if func(*func_args, **func_kwargs):
return True
else:
# check every 10s
time.sleep(10)
raise TimeOutException
def check_dp_exists(client, dp_id):
"""Check if datapipeline exists
:param object client: boto3 datapipeline client
:param string dp_id: pipeline id
:returns: True or False
"""
try:
# pipeline_description raises DataPipelineNotFound
if pipeline_description(client, dp_id):
return True
else:
return False
except DataPipelineNotFound:
return False
def check_dp_status(client, dp_id, status):
"""Checks if datapipeline matches states in status list
:param object client: boto3 datapipeline client
:param string dp_id: pipeline id
:param list status: list of states to check against
:returns: True or False
"""
if not isinstance(status, list):
raise AssertionError()
if pipeline_field(client, dp_id, field="@pipelineState") in status:
return True
else:
return False
def pipeline_status_timeout(client, dp_id, status, timeout):
args = (client, dp_id, status)
return run_with_timeout(timeout, check_dp_status, *args)
def pipeline_exists_timeout(client, dp_id, timeout):
args = (client, dp_id)
return run_with_timeout(timeout, check_dp_exists, *args)
def activate_pipeline(client, module):
"""Activates pipeline
"""
dp_name = module.params.get('name')
timeout = module.params.get('timeout')
try:
dp_id = pipeline_id(client, dp_name)
except DataPipelineNotFound:
module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES:
changed = False
else:
try:
client.activate_pipeline(pipelineId=dp_id)
except ClientError as e:
if e.response["Error"]["Code"] == "InvalidRequestException":
module.fail_json(msg="You need to populate your pipeline before activation.")
try:
pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES,
timeout=timeout)
except TimeOutException:
if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
# activated but completed more rapidly than it was checked
pass
else:
module.fail_json(msg=('Data Pipeline {0} failed to activate '
'within timeout {1} seconds').format(dp_name, timeout))
changed = True
data_pipeline = get_result(client, dp_id)
result = {'data_pipeline': data_pipeline,
'msg': 'Data Pipeline {0} activated.'.format(dp_name)}
return (changed, result)
def deactivate_pipeline(client, module):
"""Deactivates pipeline
"""
dp_name = module.params.get('name')
timeout = module.params.get('timeout')
try:
dp_id = pipeline_id(client, dp_name)
except DataPipelineNotFound:
module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES:
changed = False
else:
client.deactivate_pipeline(pipelineId=dp_id)
try:
pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES,
timeout=timeout)
except TimeOutException:
module.fail_json(msg=('Data Pipeline {0} failed to deactivate'
'within timeout {1} seconds').format(dp_name, timeout))
changed = True
data_pipeline = get_result(client, dp_id)
result = {'data_pipeline': data_pipeline,
'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)}
return (changed, result)
def _delete_dp_with_check(dp_id, client, timeout):
client.delete_pipeline(pipelineId=dp_id)
try:
pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout)
except DataPipelineNotFound:
return True
def delete_pipeline(client, module):
"""Deletes pipeline
"""
dp_name = module.params.get('name')
timeout = module.params.get('timeout')
try:
dp_id = pipeline_id(client, dp_name)
_delete_dp_with_check(dp_id, client, timeout)
changed = True
except DataPipelineNotFound:
changed = False
except TimeOutException:
module.fail_json(msg=('Data Pipeline {0} failed to delete'
'within timeout {1} seconds').format(dp_name, timeout))
result = {'data_pipeline': {},
'msg': 'Data Pipeline {0} deleted'.format(dp_name)}
return (changed, result)
def build_unique_id(module):
data = dict(module.params)
# removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline
[data.pop(each, None) for each in ('objects', 'timeout')]
json_data = json.dumps(data, sort_keys=True).encode("utf-8")
hashed_data = hashlib.md5(json_data).hexdigest()
return hashed_data
def format_tags(tags):
""" Reformats tags
:param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
:returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
"""
return [dict(key=k, value=v) for k, v in tags.items()]
def get_result(client, dp_id):
""" Get the current state of the data pipeline and reformat it to snake_case for exit_json
:param object client: boto3 datapipeline client
:param string dp_id: pipeline id
:returns: reformatted dict of pipeline description
"""
# pipeline_description returns a pipelineDescriptionList of length 1
# dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict)
dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0]
# Get uniqueId and pipelineState in fields to add to the exit_json result
dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId")
dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState")
# Remove fields; can't make a list snake_case and most of the data is redundant
del dp["fields"]
# Note: tags is already formatted fine so we don't need to do anything with it
# Reformat data pipeline and add reformatted fields back
dp = camel_dict_to_snake_dict(dp)
return dp
def diff_pipeline(client, module, objects, unique_id, dp_name):
"""Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated
"""
result = {}
changed = False
create_dp = False
# See if there is already a pipeline with the same unique_id
unique_id = build_unique_id(module)
try:
dp_id = pipeline_id(client, dp_name)
dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId"))
if dp_unique_id != unique_id:
# A change is expected but not determined. Updated to a bool in create_pipeline().
changed = "NEW_VERSION"
create_dp = True
# Unique ids are the same - check if pipeline needs modification
else:
dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects']
# Definition needs to be updated
if dp_objects != objects:
changed, msg = define_pipeline(client, module, objects, dp_id)
# No changes
else:
msg = 'Data Pipeline {0} is present'.format(dp_name)
data_pipeline = get_result(client, dp_id)
result = {'data_pipeline': data_pipeline,
'msg': msg}
except DataPipelineNotFound:
create_dp = True
return create_dp, changed, result
def define_pipeline(client, module, objects, dp_id):
"""Puts pipeline definition
"""
dp_name = module.params.get('name')
if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name)
changed = False
elif objects:
parameters = module.params.get('parameters')
values = module.params.get('values')
try:
client.put_pipeline_definition(pipelineId=dp_id,
pipelineObjects=objects,
parameterObjects=parameters,
parameterValues=values)
msg = 'Data Pipeline {0} has been updated.'.format(dp_name)
changed = True
except ClientError as e:
module.fail_json(msg="Failed to put the definition for pipeline {0}. Check that string/reference fields"
"are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
"objects".format(dp_name), exception=traceback.format_exc())
else:
changed = False
msg = ""
return changed, msg
def create_pipeline(client, module):
"""Creates datapipeline. Uses uniqueId to achieve idempotency.
"""
dp_name = module.params.get('name')
objects = module.params.get('objects', None)
description = module.params.get('description', '')
tags = module.params.get('tags')
timeout = module.params.get('timeout')
unique_id = build_unique_id(module)
create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name)
if changed == "NEW_VERSION":
# delete old version
changed, creation_result = delete_pipeline(client, module)
# There isn't a pipeline or it has different parameters than the pipeline in existence.
if create_dp:
# Make pipeline
try:
tags = format_tags(tags)
dp = client.create_pipeline(name=dp_name,
uniqueId=unique_id,
description=description,
tags=tags)
dp_id = dp['pipelineId']
pipeline_exists_timeout(client, dp_id, timeout)
except ClientError as e:
module.fail_json(msg="Failed to create the data pipeline {0}.".format(dp_name), exception=traceback.format_exc())
except TimeOutException:
module.fail_json(msg=('Data Pipeline {0} failed to create'
'within timeout {1} seconds').format(dp_name, timeout))
# Put pipeline definition
changed, msg = define_pipeline(client, module, objects, dp_id)
changed = True
data_pipeline = get_result(client, dp_id)
result = {'data_pipeline': data_pipeline,
'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg}
return (changed, result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
version=dict(removed_in_version='2.14'),
description=dict(required=False, default=''),
objects=dict(required=False, type='list', default=[]),
parameters=dict(required=False, type='list', default=[]),
timeout=dict(required=False, type='int', default=300),
state=dict(default='present', choices=['present', 'absent',
'active', 'inactive']),
tags=dict(required=False, type='dict', default={}),
values=dict(required=False, type='list', default=[])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for the datapipeline module!')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
client = boto3_conn(module, conn_type='client',
resource='datapipeline', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
state = module.params.get('state')
if state == 'present':
changed, result = create_pipeline(client, module)
elif state == 'absent':
changed, result = delete_pipeline(client, module)
elif state == 'active':
changed, result = activate_pipeline(client, module)
elif state == 'inactive':
changed, result = deactivate_pipeline(client, module)
module.exit_json(result=result, changed=changed)
if __name__ == '__main__':
main()

@ -1,472 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dms_endpoint
short_description: Creates or destroys a data migration services endpoint
description:
- Creates or destroys a data migration services endpoint,
that can be used to replicate data.
version_added: "2.9"
options:
state:
description:
- State of the endpoint.
default: present
choices: ['present', 'absent']
type: str
endpointidentifier:
description:
- An identifier name for the endpoint.
type: str
required: true
endpointtype:
description:
- Type of endpoint we want to manage.
choices: ['source', 'target']
type: str
required: true
enginename:
description:
- Database engine that we want to use, please refer to
the AWS DMS for more information on the supported
engines and their limitations.
choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora',
'redshift', 's3', 'db2', 'azuredb', 'sybase',
'dynamodb', 'mongodb', 'sqlserver']
type: str
required: true
username:
description:
- Username our endpoint will use to connect to the database.
type: str
password:
description:
- Password used to connect to the database
this attribute can only be written
the AWS API does not return this parameter.
type: str
servername:
description:
- Servername that the endpoint will connect to.
type: str
port:
description:
- TCP port for access to the database.
type: int
databasename:
description:
- Name for the database on the origin or target side.
type: str
extraconnectionattributes:
description:
- Extra attributes for the database connection, the AWS documentation
states " For more information about extra connection attributes,
see the documentation section for your data store."
type: str
kmskeyid:
description:
- Encryption key to use to encrypt replication storage and
connection information.
type: str
tags:
description:
- A list of tags to add to the endpoint.
type: dict
certificatearn:
description:
- Amazon Resource Name (ARN) for the certificate.
type: str
sslmode:
description:
- Mode used for the SSL connection.
default: none
choices: ['none', 'require', 'verify-ca', 'verify-full']
type: str
serviceaccessrolearn:
description:
- Amazon Resource Name (ARN) for the service access role that you
want to use to create the endpoint.
type: str
externaltabledefinition:
description:
- The external table definition.
type: str
dynamodbsettings:
description:
- Settings in JSON format for the target Amazon DynamoDB endpoint
if source or target is dynamodb.
type: dict
s3settings:
description:
- S3 buckets settings for the target Amazon S3 endpoint.
type: dict
dmstransfersettings:
description:
- The settings in JSON format for the DMS transfer type of
source endpoint.
type: dict
mongodbsettings:
description:
- Settings in JSON format for the source MongoDB endpoint.
type: dict
kinesissettings:
description:
- Settings in JSON format for the target Amazon Kinesis
Data Streams endpoint.
type: dict
elasticsearchsettings:
description:
- Settings in JSON format for the target Elasticsearch endpoint.
type: dict
wait:
description:
- Whether Ansible should wait for the object to be deleted when I(state=absent).
type: bool
default: false
timeout:
description:
- Time in seconds we should wait for when deleting a resource.
- Required when I(wait=true).
type: int
retries:
description:
- number of times we should retry when deleting a resource
- Required when I(wait=true).
type: int
author:
- "Rui Moreira (@ruimoreira)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details
# Endpoint Creation
- dms_endpoint:
state: absent
endpointidentifier: 'testsource'
endpointtype: source
enginename: aurora
username: testing1
password: testint1234
servername: testing.domain.com
port: 3306
databasename: 'testdb'
sslmode: none
wait: false
'''
RETURN = ''' # '''
__metaclass__ = type
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
backoff_params = dict(tries=5, delay=1, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_endpoints(connection, endpoint_identifier):
""" checks if the endpoint exists """
try:
endpoint_filter = dict(Name='endpoint-id',
Values=[endpoint_identifier])
return connection.describe_endpoints(Filters=[endpoint_filter])
except botocore.exceptions.ClientError:
return {'Endpoints': []}
@AWSRetry.backoff(**backoff_params)
def dms_delete_endpoint(client, **params):
"""deletes the DMS endpoint based on the EndpointArn"""
if module.params.get('wait'):
return delete_dms_endpoint(client)
else:
return client.delete_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def dms_create_endpoint(client, **params):
""" creates the DMS endpoint"""
return client.create_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def dms_modify_endpoint(client, **params):
""" updates the endpoint"""
return client.modify_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def get_endpoint_deleted_waiter(client):
return client.get_waiter('endpoint_deleted')
def endpoint_exists(endpoint):
""" Returns boolean based on the existence of the endpoint
:param endpoint: dict containing the described endpoint
:return: bool
"""
return bool(len(endpoint['Endpoints']))
def delete_dms_endpoint(connection):
try:
endpoint = describe_endpoints(connection,
module.params.get('endpointidentifier'))
endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn')
delete_arn = dict(
EndpointArn=endpoint_arn
)
if module.params.get('wait'):
delete_output = connection.delete_endpoint(**delete_arn)
delete_waiter = get_endpoint_deleted_waiter(connection)
delete_waiter.wait(
Filters=[{
'Name': 'endpoint-arn',
'Values': [endpoint_arn]
}],
WaiterConfig={
'Delay': module.params.get('timeout'),
'MaxAttempts': module.params.get('retries')
}
)
return delete_output
else:
return connection.delete_endpoint(**delete_arn)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to delete the DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to delete the DMS endpoint.",
exception=traceback.format_exc())
def create_module_params():
"""
Reads the module parameters and returns a dict
:return: dict
"""
endpoint_parameters = dict(
EndpointIdentifier=module.params.get('endpointidentifier'),
EndpointType=module.params.get('endpointtype'),
EngineName=module.params.get('enginename'),
Username=module.params.get('username'),
Password=module.params.get('password'),
ServerName=module.params.get('servername'),
Port=module.params.get('port'),
DatabaseName=module.params.get('databasename'),
SslMode=module.params.get('sslmode')
)
if module.params.get('EndpointArn'):
endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
if module.params.get('certificatearn'):
endpoint_parameters['CertificateArn'] = \
module.params.get('certificatearn')
if module.params.get('dmstransfersettings'):
endpoint_parameters['DmsTransferSettings'] = \
module.params.get('dmstransfersettings')
if module.params.get('extraconnectionattributes'):
endpoint_parameters['ExtraConnectionAttributes'] =\
module.params.get('extraconnectionattributes')
if module.params.get('kmskeyid'):
endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
if module.params.get('tags'):
endpoint_parameters['Tags'] = module.params.get('tags')
if module.params.get('serviceaccessrolearn'):
endpoint_parameters['ServiceAccessRoleArn'] = \
module.params.get('serviceaccessrolearn')
if module.params.get('externaltabledefinition'):
endpoint_parameters['ExternalTableDefinition'] = \
module.params.get('externaltabledefinition')
if module.params.get('dynamodbsettings'):
endpoint_parameters['DynamoDbSettings'] = \
module.params.get('dynamodbsettings')
if module.params.get('s3settings'):
endpoint_parameters['S3Settings'] = module.params.get('s3settings')
if module.params.get('mongodbsettings'):
endpoint_parameters['MongoDbSettings'] = \
module.params.get('mongodbsettings')
if module.params.get('kinesissettings'):
endpoint_parameters['KinesisSettings'] = \
module.params.get('kinesissettings')
if module.params.get('elasticsearchsettings'):
endpoint_parameters['ElasticsearchSettings'] = \
module.params.get('elasticsearchsettings')
if module.params.get('wait'):
endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
if module.params.get('timeout'):
endpoint_parameters['timeout'] = module.params.get('timeout')
if module.params.get('retries'):
endpoint_parameters['retries'] = module.params.get('retries')
return endpoint_parameters
def compare_params(param_described):
"""
Compares the dict obtained from the describe DMS endpoint and
what we are reading from the values in the template We can
never compare the password as boto3's method for describing
a DMS endpoint does not return the value for
the password for security reasons ( I assume )
"""
modparams = create_module_params()
changed = False
for paramname in modparams:
if paramname == 'Password' or paramname in param_described \
and param_described[paramname] == modparams[paramname] or \
str(param_described[paramname]).lower() \
== modparams[paramname]:
pass
else:
changed = True
return changed
def modify_dms_endpoint(connection):
try:
params = create_module_params()
return dms_modify_endpoint(connection, **params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update DMS endpoint.",
exception=traceback.format_exc())
def create_dms_endpoint(connection):
"""
Function to create the dms endpoint
:param connection: boto3 aws connection
:return: information about the dms endpoint object
"""
try:
params = create_module_params()
return dms_create_endpoint(connection, **params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create DMS endpoint.",
exception=traceback.format_exc())
def main():
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
endpointidentifier=dict(required=True),
endpointtype=dict(choices=['source', 'target'], required=True),
enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
'aurora', 'redshift', 's3', 'db2', 'azuredb',
'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
required=True),
username=dict(),
password=dict(no_log=True),
servername=dict(),
port=dict(type='int'),
databasename=dict(),
extraconnectionattributes=dict(),
kmskeyid=dict(),
tags=dict(type='dict'),
certificatearn=dict(),
sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
default='none'),
serviceaccessrolearn=dict(),
externaltabledefinition=dict(),
dynamodbsettings=dict(type='dict'),
s3settings=dict(type='dict'),
dmstransfersettings=dict(type='dict'),
mongodbsettings=dict(type='dict'),
kinesissettings=dict(type='dict'),
elasticsearchsettings=dict(type='dict'),
wait=dict(type='bool', default=False),
timeout=dict(type='int'),
retries=dict(type='int')
)
global module
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_if=[
["state", "absent", ["wait"]],
["wait", "True", ["timeout"]],
["wait", "True", ["retries"]],
],
supports_check_mode=False
)
exit_message = None
changed = False
state = module.params.get('state')
dmsclient = module.client('dms')
endpoint = describe_endpoints(dmsclient,
module.params.get('endpointidentifier'))
if state == 'present':
if endpoint_exists(endpoint):
module.params['EndpointArn'] = \
endpoint['Endpoints'][0].get('EndpointArn')
params_changed = compare_params(endpoint["Endpoints"][0])
if params_changed:
updated_dms = modify_dms_endpoint(dmsclient)
exit_message = updated_dms
changed = True
else:
module.exit_json(changed=False, msg="Endpoint Already Exists")
else:
dms_properties = create_dms_endpoint(dmsclient)
exit_message = dms_properties
changed = True
elif state == 'absent':
if endpoint_exists(endpoint):
delete_results = delete_dms_endpoint(dmsclient)
exit_message = delete_results
changed = True
else:
changed = False
exit_message = 'DMS Endpoint does not exist'
module.exit_json(changed=changed, msg=exit_message)
if __name__ == '__main__':
main()

@ -1,238 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dms_replication_subnet_group
short_description: creates or destroys a data migration services subnet group
description:
- Creates or destroys a data migration services subnet group.
version_added: "2.9"
options:
state:
description:
- State of the subnet group.
default: present
choices: ['present', 'absent']
type: str
identifier:
description:
- The name for the replication subnet group.
This value is stored as a lowercase string.
Must contain no more than 255 alphanumeric characters,
periods, spaces, underscores, or hyphens. Must not be "default".
type: str
required: true
description:
description:
- The description for the subnet group.
type: str
required: true
subnet_ids:
description:
- A list containing the subnet ids for the replication subnet group,
needs to be at least 2 items in the list.
type: list
elements: str
required: true
author:
- "Rui Moreira (@ruimoreira)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- dms_replication_subnet_group:
state: present
identifier: "dev-sngroup"
description: "Development Subnet Group asdasdas"
subnet_ids: ['subnet-id1','subnet-id2']
'''
RETURN = ''' # '''
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
backoff_params = dict(tries=5, delay=1, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_subnet_group(connection, subnet_group):
"""checks if instance exists"""
try:
subnet_group_filter = dict(Name='replication-subnet-group-id',
Values=[subnet_group])
return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter])
except botocore.exceptions.ClientError:
return {'ReplicationSubnetGroups': []}
@AWSRetry.backoff(**backoff_params)
def replication_subnet_group_create(connection, **params):
""" creates the replication subnet group """
return connection.create_replication_subnet_group(**params)
@AWSRetry.backoff(**backoff_params)
def replication_subnet_group_modify(connection, **modify_params):
return connection.modify_replication_subnet_group(**modify_params)
@AWSRetry.backoff(**backoff_params)
def replication_subnet_group_delete(module, connection):
subnetid = module.params.get('identifier')
delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid)
return connection.delete_replication_subnet_group(**delete_parameters)
def replication_subnet_exists(subnet):
""" Returns boolean based on the existence of the endpoint
:param endpoint: dict containing the described endpoint
:return: bool
"""
return bool(len(subnet['ReplicationSubnetGroups']))
def create_module_params(module):
"""
Reads the module parameters and returns a dict
:return: dict
"""
instance_parameters = dict(
# ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
ReplicationSubnetGroupDescription=module.params.get('description'),
SubnetIds=module.params.get('subnet_ids'),
)
return instance_parameters
def compare_params(module, param_described):
"""
Compares the dict obtained from the describe function and
what we are reading from the values in the template We can
never compare passwords as boto3's method for describing
a DMS endpoint does not return the value for
the password for security reasons ( I assume )
"""
modparams = create_module_params(module)
changed = False
# need to sanitize values that get returned from the API
if 'VpcId' in param_described.keys():
param_described.pop('VpcId')
if 'SubnetGroupStatus' in param_described.keys():
param_described.pop('SubnetGroupStatus')
for paramname in modparams.keys():
if paramname in param_described.keys() and \
param_described.get(paramname) == modparams[paramname]:
pass
elif paramname == 'SubnetIds':
subnets = []
for subnet in param_described.get('Subnets'):
subnets.append(subnet.get('SubnetIdentifier'))
for modulesubnet in modparams['SubnetIds']:
if modulesubnet in subnets:
pass
else:
changed = True
return changed
def create_replication_subnet_group(module, connection):
try:
params = create_module_params(module)
return replication_subnet_group_create(connection, **params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create DMS replication subnet group.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create DMS replication subnet group.",
exception=traceback.format_exc())
def modify_replication_subnet_group(module, connection):
try:
modify_params = create_module_params(module)
return replication_subnet_group_modify(connection, **modify_params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
exception=traceback.format_exc())
def main():
argument_spec = dict(
state=dict(type='str', choices=['present', 'absent'], default='present'),
identifier=dict(type='str', required=True),
description=dict(type='str', required=True),
subnet_ids=dict(type='list', elements='str', required=True),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
exit_message = None
changed = False
state = module.params.get('state')
dmsclient = module.client('dms')
subnet_group = describe_subnet_group(dmsclient,
module.params.get('identifier'))
if state == 'present':
if replication_subnet_exists(subnet_group):
if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]):
if not module.check_mode:
exit_message = modify_replication_subnet_group(module, dmsclient)
else:
exit_message = dmsclient
changed = True
else:
exit_message = "No changes to Subnet group"
else:
if not module.check_mode:
exit_message = create_replication_subnet_group(module, dmsclient)
changed = True
else:
exit_message = "Check mode enabled"
elif state == 'absent':
if replication_subnet_exists(subnet_group):
if not module.check_mode:
replication_subnet_group_delete(module, dmsclient)
changed = True
exit_message = "Replication subnet group Deleted"
else:
exit_message = dmsclient
changed = True
else:
changed = False
exit_message = "Replication subnet group does not exist"
module.exit_json(changed=changed, msg=exit_message)
if __name__ == '__main__':
main()

@ -1,522 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dynamodb_table
short_description: Create, update or delete AWS Dynamo DB tables
version_added: "2.0"
description:
- Create or delete AWS Dynamo DB tables.
- Can update the provisioned throughput on existing tables.
- Returns the status of the specified table.
author: Alan Loi (@loia)
requirements:
- "boto >= 2.37.0"
- "boto3 >= 1.4.4 (for tagging)"
options:
state:
description:
- Create or delete the table.
choices: ['present', 'absent']
default: 'present'
type: str
name:
description:
- Name of the table.
required: true
type: str
hash_key_name:
description:
- Name of the hash key.
- Required when C(state=present).
type: str
hash_key_type:
description:
- Type of the hash key.
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
type: str
range_key_name:
description:
- Name of the range key.
type: str
range_key_type:
description:
- Type of the range key.
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
type: str
read_capacity:
description:
- Read throughput capacity (units) to provision.
default: 1
type: int
write_capacity:
description:
- Write throughput capacity (units) to provision.
default: 1
type: int
indexes:
description:
- list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
- "required options: ['name', 'type', 'hash_key_name']"
- "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
suboptions:
name:
description: The name of the index.
type: str
required: true
type:
description:
- The type of index.
- "Valid types: C(all), C(global_all), C(global_include), C(global_keys_only), C(include), C(keys_only)"
type: str
required: true
hash_key_name:
description: The name of the hash-based key.
required: true
type: str
hash_key_type:
description: The type of the hash-based key.
type: str
range_key_name:
description: The name of the range-based key.
type: str
range_key_type:
type: str
description: The type of the range-based key.
includes:
type: list
description: A list of fields to include when using C(global_include) or C(include) indexes.
read_capacity:
description:
- Read throughput capacity (units) to provision for the index.
type: int
write_capacity:
description:
- Write throughput capacity (units) to provision for the index.
type: int
default: []
version_added: "2.1"
type: list
elements: dict
tags:
version_added: "2.4"
description:
- A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag.
- 'For example: C({"key":"value"}) and C({"key":"value","key2":"value2"})'
type: dict
wait_for_active_timeout:
version_added: "2.4"
description:
- how long before wait gives up, in seconds. only used when tags is set
default: 60
type: int
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Create dynamo table with hash and range primary key
- dynamodb_table:
name: my-table
region: us-east-1
hash_key_name: id
hash_key_type: STRING
range_key_name: create_time
range_key_type: NUMBER
read_capacity: 2
write_capacity: 2
tags:
tag_name: tag_value
# Update capacity on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
read_capacity: 10
write_capacity: 10
# set index on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
indexes:
- name: NamedIndex
type: global_include
hash_key_name: id
range_key_name: create_time
includes:
- other_field
- other_field2
read_capacity: 10
write_capacity: 10
# Delete dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
state: absent
'''
RETURN = '''
table_status:
description: The current status of the table.
returned: success
type: str
sample: ACTIVE
'''
import time
import traceback
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
from boto.dynamodb2.types import STRING, NUMBER, BINARY
from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
from boto.dynamodb2.exceptions import ValidationException
HAS_BOTO = True
DYNAMO_TYPE_MAP = {
'STRING': STRING,
'NUMBER': NUMBER,
'BINARY': BINARY
}
except ImportError:
HAS_BOTO = False
try:
import botocore
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
DYNAMO_TYPE_DEFAULT = 'STRING'
INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None):
table_name = module.params.get('name')
hash_key_name = module.params.get('hash_key_name')
hash_key_type = module.params.get('hash_key_type')
range_key_name = module.params.get('range_key_name')
range_key_type = module.params.get('range_key_type')
read_capacity = module.params.get('read_capacity')
write_capacity = module.params.get('write_capacity')
all_indexes = module.params.get('indexes')
tags = module.params.get('tags')
wait_for_active_timeout = module.params.get('wait_for_active_timeout')
for index in all_indexes:
validate_index(index, module)
schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
throughput = {
'read': read_capacity,
'write': write_capacity
}
indexes, global_indexes = get_indexes(all_indexes)
result = dict(
region=region,
table_name=table_name,
hash_key_name=hash_key_name,
hash_key_type=hash_key_type,
range_key_name=range_key_name,
range_key_type=range_key_type,
read_capacity=read_capacity,
write_capacity=write_capacity,
indexes=all_indexes,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
else:
if not module.check_mode:
Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
result['changed'] = True
if not module.check_mode:
result['table_status'] = table.describe()['Table']['TableStatus']
if tags:
# only tables which are active can be tagged
wait_until_table_active(module, table, wait_for_active_timeout)
account_id = get_account_id(boto3_sts)
boto3_dynamodb.tag_resource(
ResourceArn='arn:aws:dynamodb:' +
region +
':' +
account_id +
':table/' +
table_name,
Tags=ansible_dict_to_boto3_tag_list(tags))
result['tags'] = tags
except BotoServerError:
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def get_account_id(boto3_sts):
return boto3_sts.get_caller_identity()["Account"]
def wait_until_table_active(module, table, wait_timeout):
max_wait_time = time.time() + wait_timeout
while (max_wait_time > time.time()) and (table.describe()['Table']['TableStatus'] != 'ACTIVE'):
time.sleep(5)
if max_wait_time <= time.time():
# waiting took too long
module.fail_json(msg="timed out waiting for table to exist")
def delete_dynamo_table(connection, module):
table_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
table_name=table_name,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
if not module.check_mode:
table.delete()
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def dynamo_table_exists(table):
try:
table.describe()
return True
except JSONResponseError as e:
if e.message and e.message.startswith('Requested resource not found'):
return False
else:
raise e
def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
table.describe() # populate table details
throughput_changed = False
global_indexes_changed = False
if has_throughput_changed(table, throughput):
if not check_mode:
throughput_changed = table.update(throughput=throughput)
else:
throughput_changed = True
removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
if removed_indexes:
if not check_mode:
for name, index in removed_indexes.items():
global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
else:
global_indexes_changed = True
if added_indexes:
if not check_mode:
for name, index in added_indexes.items():
global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
else:
global_indexes_changed = True
if index_throughput_changes:
if not check_mode:
# todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
try:
global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
except ValidationException:
pass
else:
global_indexes_changed = True
return throughput_changed or global_indexes_changed
def has_throughput_changed(table, new_throughput):
if not new_throughput:
return False
return new_throughput['read'] != table.throughput['read'] or \
new_throughput['write'] != table.throughput['write']
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
if range_key_name:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
else:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
return schema
def get_changed_global_indexes(table, global_indexes):
table.describe()
table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
table_index_objects = dict((index.name, index) for index in table.global_indexes)
set_index_info = dict((index.name, index.schema()) for index in global_indexes)
set_index_objects = dict((index.name, index) for index in global_indexes)
removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
# for name, index in set_index_objects.items():
# if (name not in added_indexes and
# (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or
# index.throughput['write'] != str(table_index_objects[name].throughput['write']))):
# index_throughput_changes[name] = index.throughput
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)
return removed_indexes, added_indexes, index_throughput_changes
def validate_index(index, module):
for key, val in index.items():
if key not in INDEX_OPTIONS:
module.fail_json(msg='%s is not a valid option for an index' % key)
for required_option in INDEX_REQUIRED_OPTIONS:
if required_option not in index:
module.fail_json(msg='%s is a required option for an index' % required_option)
if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
def get_indexes(all_indexes):
indexes = []
global_indexes = []
for index in all_indexes:
name = index['name']
schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
throughput = {
'read': index.get('read_capacity', 1),
'write': index.get('write_capacity', 1)
}
if index['type'] == 'all':
indexes.append(AllIndex(name, parts=schema))
elif index['type'] == 'global_all':
global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'global_include':
global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
elif index['type'] == 'global_keys_only':
global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'include':
indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
elif index['type'] == 'keys_only':
indexes.append(KeysOnlyIndex(name, parts=schema))
return indexes, global_indexes
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
hash_key_name=dict(type='str'),
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
range_key_name=dict(type='str'),
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
indexes=dict(default=[], type='list'),
tags=dict(type='dict'),
wait_for_active_timeout=dict(default=60, type='int'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if not HAS_BOTO3 and module.params.get('tags'):
module.fail_json(msg='boto3 required when using tags for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
if module.params.get('tags'):
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs)
if not hasattr(boto3_dynamodb, 'tag_resource'):
module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version')
boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
else:
boto3_dynamodb = None
boto3_sts = None
state = module.params.get('state')
if state == 'present':
create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region)
elif state == 'absent':
delete_dynamo_table(connection, module)
if __name__ == '__main__':
main()

@ -1,174 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dynamodb_ttl
short_description: Set TTL for a given DynamoDB table
description:
- Uses boto3 to set TTL.
- Requires botocore version 1.5.24 or higher.
version_added: "2.4"
options:
state:
description:
- State to set DynamoDB table to.
choices: ['enable', 'disable']
required: false
type: str
table_name:
description:
- Name of the DynamoDB table to work on.
required: true
type: str
attribute_name:
description:
- The name of the Time To Live attribute used to store the expiration time for items in the table.
- This appears to be required by the API even when disabling TTL.
required: true
type: str
author: Ted Timmons (@tedder)
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore>=1.5.24, boto3 ]
'''
EXAMPLES = '''
- name: enable TTL on my cowfacts table
dynamodb_ttl:
state: enable
table_name: cowfacts
attribute_name: cow_deleted_date
- name: disable TTL on my cowfacts table
dynamodb_ttl:
state: disable
table_name: cowfacts
attribute_name: cow_deleted_date
'''
RETURN = '''
current_status:
description: current or new TTL specification.
type: dict
returned: always
sample:
- { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
- { "AttributeName": "deploy_timestamp", "Enabled": true }
'''
import distutils.version
import traceback
try:
import botocore
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
def get_current_ttl_state(c, table_name):
'''Fetch the state dict for a table.'''
current_state = c.describe_time_to_live(TableName=table_name)
return current_state.get('TimeToLiveDescription')
def does_state_need_changing(attribute_name, desired_state, current_spec):
'''Run checks to see if the table needs to be modified. Basically a dirty check.'''
if not current_spec:
# we don't have an entry (or a table?)
return True
if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']:
return True
if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']:
return True
if attribute_name != current_spec.get('AttributeName'):
return True
return False
def set_ttl_state(c, table_name, state, attribute_name):
'''Set our specification. Returns the update_time_to_live specification dict,
which is different than the describe_* call.'''
is_enabled = False
if state.lower() == 'enable':
is_enabled = True
ret = c.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={
'Enabled': is_enabled,
'AttributeName': attribute_name
}
)
return ret.get('TimeToLiveSpecification')
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(choices=['enable', 'disable']),
table_name=dict(required=True),
attribute_name=dict(required=True))
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
elif distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'):
# TTL was added in this version.
module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24'))
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
dbclient = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
result = {'changed': False}
state = module.params['state']
# wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
# methods so it's easier to do here.
try:
current_state = get_current_ttl_state(dbclient, module.params['table_name'])
if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state):
# changes needed
new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name'])
result['current_status'] = new_state
result['changed'] = True
else:
# no changes needed
result['current_status'] = current_state
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.ParamValidationError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
except ValueError as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,226 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
version_added: "2.0"
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
type: str
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
type: str
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
default: "default"
type: str
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
type: str
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
version_added: "2.2"
type: bool
kms_key_id:
description:
- KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
version_added: "2.2"
type: str
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
type: bool
default: 'no'
wait_timeout:
description:
- How long before wait gives up, in seconds. Prior to 2.3 the default was 1200.
- From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
default: 600
type: int
tags:
description:
- 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})'
type: dict
tag_equality:
description:
- Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
in an existing AMI, the AMI will not be copied again.
default: false
type: bool
version_added: 2.6
author:
- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
- Tim C (@defunctio) <defunct@defunct.io>
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy (will not copy the same AMI twice)
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
tag_equality: yes
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
image_id:
description: AMI ID of the copied AMI
returned: always
type: str
sample: ami-e689729e
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils._text import to_native
try:
from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
image = None
changed = False
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
if module.params.get('tag_equality'):
filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
filters.append(dict(Name='state', Values=['available', 'pending']))
images = ec2.describe_images(Filters=filters)
if len(images['Images']) > 0:
image = images['Images'][0]
if not image:
image = ec2.copy_image(**params)
image_id = image['ImageId']
if tags:
ec2.create_tags(Resources=[image_id],
Tags=ansible_dict_to_boto3_tag_list(tags))
changed = True
if module.params.get('wait'):
delay = 15
max_attempts = module.params.get('wait_timeout') // delay
image_id = image.get('ImageId')
ec2.get_waiter('image_available').wait(
ImageIds=[image_id],
WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not copy AMI")
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
def main():
argument_spec = dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict'),
tag_equality=dict(type='bool', default=False))
module = AnsibleAWSModule(argument_spec=argument_spec)
# TODO: Check botocore version
ec2 = module.client('ec2')
copy_image(module, ec2)
if __name__ == '__main__':
main()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save