Remove incidental_inventory_aws_ec2 and supporting plugins (#77877)

* Remove aws/2.7 and aws/3.6 from incidental test matrix entries

* Remove aws.sh symlink
pull/77942/head
Sloane Hertel 2 years ago committed by GitHub
parent bd849b3076
commit e6075109d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -183,10 +183,6 @@ stages:
test: ios/csr1000v/
- name: VyOS Python
test: vyos/1.1.8/
- name: AWS Python 2.7
test: aws/2.7
- name: AWS Python 3.6
test: aws/3.6
- name: Cloud Python
test: cloud/
- stage: Summary

@ -1,3 +0,0 @@
cloud/aws
shippable/aws/incidental
context/controller

@ -1,11 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
vars:
template_name: "../templates/{{ template | default('inventory.yml') }}"
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: "{{ lookup('template', template_name) }}"

@ -1,9 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: ""

@ -1,64 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, add it to inventory and then terminate it without updating the cache
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -1,62 +0,0 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: get image ID to create an instance
ec2_ami_info:
filters:
architecture: x86_64
owner-id: '125523088429'
virtualization-type: hvm
root-device-type: ebs
name: 'Fedora-Atomic-27*'
<<: *aws_connection_info
register: fedora_images
- set_fact:
image_id: '{{ fedora_images.images.0.image_id }}'
- name: create a VPC to work in
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: present
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_vpc
- set_fact:
vpc_id: '{{ setup_vpc.vpc.id }}'
- name: create a subnet to use for creating an ec2 instance
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ setup_vpc.vpc.id }}'
cidr: 10.10.0.0/24
state: present
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_subnet
- set_fact:
subnet_id: '{{ setup_subnet.subnet.id }}'
- name: create a security group to use for creating an ec2 instance
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: present
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
register: setup_sg
- set_fact:
sg_id: '{{ setup_sg.group_id }}'

@ -1,39 +0,0 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: remove setup security group
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: absent
vpc_id: '{{ vpc_id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup subnet
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ vpc_id }}'
cidr: 10.10.0.0/24
state: absent
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup VPC
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: absent
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes

@ -1,9 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert inventory was not populated by aws_ec2 inventory plugin
assert:
that:
- "'aws_ec2' not in groups"

@ -1,18 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert cache was used to populate inventory
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- meta: refresh_inventory
- name: assert refresh_inventory updated the cache
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"

@ -1,91 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, refresh inventory, remove host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -1,79 +0,0 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
# Create new host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
tag1: value1
tag2: value2
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: register the keyed sg group name
set_fact:
sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
- name: register one of the keyed tag groups name
set_fact:
tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
- name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
assert:
that:
# There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
- "groups | length == 9"
- "groups[tag_group_name] | length == 1"
- "groups[sg_group_name] | length == 1"
- "groups.arch_x86_64 | length == 1"
- "groups.tag_with_name_key | length == 1"
- vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: "{{ sg_id }}"
vpc_subnet_id: "{{ subnet_id }}"
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -1,74 +0,0 @@
- name: test updating inventory
block:
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: create a new host
ec2:
image: "{{ images[aws_region] }}"
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
ignore_errors: yes

@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -eux
source virtualenv.sh
python -m pip install boto3 boto
# ensure test config is empty
ansible-playbook playbooks/empty_inventory_config.yml "$@"
export ANSIBLE_INVENTORY_ENABLED=aws_ec2
# test with default inventory file
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
export ANSIBLE_INVENTORY=test.aws_ec2.yml
# test empty inventory config
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
# generate inventory config and test using it
ansible-playbook playbooks/create_inventory_config.yml "$@"
ansible-playbook playbooks/test_populating_inventory.yml "$@"
# generate inventory config with caching and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
ansible-playbook playbooks/populate_cache.yml "$@"
ansible-playbook playbooks/test_inventory_cache.yml "$@"
# remove inventory cache
rm -r aws_ec2_cache_dir/
# generate inventory config with constructed features and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
# cleanup inventory config
ansible-playbook playbooks/empty_inventory_config.yml "$@"

@ -1,12 +0,0 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
hostnames:
- tag:Name
- dns-name

@ -1,12 +0,0 @@
plugin: aws_ec2
cache: True
cache_plugin: jsonfile
cache_connection: aws_ec2_cache_dir
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'

@ -1,20 +0,0 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
keyed_groups:
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
- key: 'tags'
prefix: 'tag'
- prefix: 'arch'
key: "architecture"
compose:
test_compose_var_sum: tags.tag1 + tags.tag2
groups:
tag_with_name_key: "'Name' in (tags | list)"

@ -170,13 +170,7 @@ test/integration/targets/win_script/files/test_script_with_splatting.ps1 pslint:
test/lib/ansible_test/_data/requirements/sanity.pslint.ps1 pslint:PSCustomUseLiteralPath # Uses wildcards on purpose
test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
test/lib/ansible_test/_util/target/setup/requirements.py replace-urlopen
test/support/integration/plugins/inventory/aws_ec2.py pylint:use-a-generator
test/support/integration/plugins/modules/ec2_group.py pylint:use-a-generator
test/support/integration/plugins/modules/timezone.py pylint:disallowed-name
test/support/integration/plugins/module_utils/aws/core.py pylint:property-with-parameters
test/support/integration/plugins/module_utils/cloud.py future-import-boilerplate
test/support/integration/plugins/module_utils/cloud.py metaclass-boilerplate
test/support/integration/plugins/module_utils/cloud.py pylint:isinstance-second-argument-not-valid-type
test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
@ -304,4 +298,3 @@ lib/ansible/module_utils/compat/_selectors2.py mypy-3.9:assignment # vendored c
lib/ansible/module_utils/compat/_selectors2.py mypy-3.10:assignment # vendored code
lib/ansible/module_utils/compat/_selectors2.py mypy-3.11:assignment # vendored code
lib/ansible/module_utils/compat/_selectors2.py mypy-2.7:attr-defined # vendored code
test/support/integration/plugins/modules/ec2.py pylint:ansible-deprecated-version

@ -1,760 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: aws_ec2
plugin_type: inventory
short_description: EC2 inventory source
requirements:
- boto3
- botocore
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Get inventory hosts from Amazon Web Services EC2.
- Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
notes:
- If no credentials are provided and the control node has an associated IAM instance profile then the
role will be used for authentication.
author:
- Sloane Hertel (@s-hertel)
options:
aws_profile:
description: The AWS profile
type: str
aliases: [ boto_profile ]
env:
- name: AWS_DEFAULT_PROFILE
- name: AWS_PROFILE
aws_access_key:
description: The AWS access key to use.
type: str
aliases: [ aws_access_key_id ]
env:
- name: EC2_ACCESS_KEY
- name: AWS_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID
aws_secret_key:
description: The AWS secret key that corresponds to the access key.
type: str
aliases: [ aws_secret_access_key ]
env:
- name: EC2_SECRET_KEY
- name: AWS_SECRET_KEY
- name: AWS_SECRET_ACCESS_KEY
aws_security_token:
description: The AWS security token if using temporary access and secret keys.
type: str
env:
- name: EC2_SECURITY_TOKEN
- name: AWS_SESSION_TOKEN
- name: AWS_SECURITY_TOKEN
plugin:
description: Token that ensures this is a source file for the plugin.
required: True
choices: ['aws_ec2']
iam_role_arn:
description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
credentials with enough privilege to perform the AssumeRole action.
version_added: '2.9'
regions:
description:
- A list of regions in which to describe EC2 instances.
- If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
type: list
default: []
hostnames:
description:
- A list in order of precedence for hostname variables.
- You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
type: list
default: []
filters:
description:
- A dictionary of filter value pairs.
- Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
type: dict
default: {}
include_extra_api_calls:
description:
- Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- Spot instances may be persistent and instances may have associated events.
type: bool
default: False
version_added: '2.8'
strict_permissions:
description:
- By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
type: bool
default: True
use_contrib_script_compatible_sanitization:
description:
- By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
This option allows you to override that, in efforts to allow migration from the old inventory script and
matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
otherwise the core engine will just use the standard sanitization on top.
- This is not the default as such names break certain functionality as not all characters are valid Python identifiers
which group names end up being used as.
type: bool
default: False
version_added: '2.8'
'''
EXAMPLES = '''
# Minimal example using environment vars or instance role credentials
# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
plugin: aws_ec2
regions:
- us-east-1
# Example using filters, ignoring permission errors, and specifying the hostname precedence
plugin: aws_ec2
boto_profile: aws_profile
# Populate inventory with instances in these regions
regions:
- us-east-1
- us-east-2
filters:
# All instances with their `Environment` tag set to `dev`
tag:Environment: dev
# All dev and QA hosts
tag:Environment:
- dev
- qa
instance.group-id: sg-xxxxxxxx
# Ignores 403 errors rather than failing
strict_permissions: False
# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
# inventory_hostname use compose (see example below).
hostnames:
- tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- tag:CustomDNSName
- dns-name
- private-ip-address
# Example using constructed features to create groups and set ansible_host
plugin: aws_ec2
regions:
- us-east-1
- us-west-1
# keyed_groups may be used to create custom groups
strict: False
keyed_groups:
# Add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'architecture'
# Add hosts to tag_Name_Value groups for each Name/Value tag pair
- prefix: tag
key: tags
# Add hosts to e.g. instance_type_z3_tiny
- prefix: instance_type
key: instance_type
# Create security_groups_sg_abcd1234 group for each SG
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
# Create a group for each value of the Application tag
- key: tags.Application
separator: ''
# Create a group per region e.g. aws_region_us_east_2
- key: placement.region
prefix: aws_region
# Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- key: tags['Role']
prefix: foo
parent_group: "project"
# Set individual variables with compose
compose:
# Use the private IP address to connect to the host
# (note: this does not modify inventory_hostname, which is set via I(hostnames))
ansible_host: private_ip_address
'''
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
from ansible.module_utils.six import string_types
try:
import boto3
import botocore
except ImportError:
raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
display = Display()
# The mappings give an array of keys to get from the filter name to the value
# returned by boto3's EC2 describe_instances method.
instance_meta_filter_to_boto_attr = {
'group-id': ('Groups', 'GroupId'),
'group-name': ('Groups', 'GroupName'),
'network-interface.attachment.instance-owner-id': ('OwnerId',),
'owner-id': ('OwnerId',),
'requester-id': ('RequesterId',),
'reservation-id': ('ReservationId',),
}
instance_data_filter_to_boto_attr = {
'affinity': ('Placement', 'Affinity'),
'architecture': ('Architecture',),
'availability-zone': ('Placement', 'AvailabilityZone'),
'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
'client-token': ('ClientToken',),
'dns-name': ('PublicDnsName',),
'host-id': ('Placement', 'HostId'),
'hypervisor': ('Hypervisor',),
'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
'image-id': ('ImageId',),
'instance-id': ('InstanceId',),
'instance-lifecycle': ('InstanceLifecycle',),
'instance-state-code': ('State', 'Code'),
'instance-state-name': ('State', 'Name'),
'instance-type': ('InstanceType',),
'instance.group-id': ('SecurityGroups', 'GroupId'),
'instance.group-name': ('SecurityGroups', 'GroupName'),
'ip-address': ('PublicIpAddress',),
'kernel-id': ('KernelId',),
'key-name': ('KeyName',),
'launch-index': ('AmiLaunchIndex',),
'launch-time': ('LaunchTime',),
'monitoring-state': ('Monitoring', 'State'),
'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
'network-interface.attachment.instance-id': ('InstanceId',),
'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
'network-interface.description': ('NetworkInterfaces', 'Description'),
'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
# 'network-interface.requester-id': (),
'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.status': ('NetworkInterfaces', 'Status'),
'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
'placement-group-name': ('Placement', 'GroupName'),
'platform': ('Platform',),
'private-dns-name': ('PrivateDnsName',),
'private-ip-address': ('PrivateIpAddress',),
'product-code': ('ProductCodes', 'ProductCodeId'),
'product-code.type': ('ProductCodes', 'ProductCodeType'),
'ramdisk-id': ('RamdiskId',),
'reason': ('StateTransitionReason',),
'root-device-name': ('RootDeviceName',),
'root-device-type': ('RootDeviceType',),
'source-dest-check': ('SourceDestCheck',),
'spot-instance-request-id': ('SpotInstanceRequestId',),
'state-reason-code': ('StateReason', 'Code'),
'state-reason-message': ('StateReason', 'Message'),
'subnet-id': ('SubnetId',),
'tag': ('Tags',),
'tag-key': ('Tags',),
'tag-value': ('Tags',),
'tenancy': ('Placement', 'Tenancy'),
'virtualization-type': ('VirtualizationType',),
'vpc-id': ('VpcId',),
}
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'aws_ec2'
def __init__(self):
super(InventoryModule, self).__init__()
self.group_prefix = 'aws_ec2_'
# credentials
self.boto_profile = None
self.aws_secret_access_key = None
self.aws_access_key_id = None
self.aws_security_token = None
self.iam_role_arn = None
def _compile_values(self, obj, attr):
'''
:param obj: A list or dict of instance attributes
:param attr: A key
:return The value(s) found via the attr
'''
if obj is None:
return
temp_obj = []
if isinstance(obj, list) or isinstance(obj, tuple):
for each in obj:
value = self._compile_values(each, attr)
if value:
temp_obj.append(value)
else:
temp_obj = obj.get(attr)
has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
if has_indexes and len(temp_obj) == 1:
return temp_obj[0]
return temp_obj
def _get_boto_attr_chain(self, filter_name, instance):
'''
:param filter_name: The filter
:param instance: instance dict returned by boto3 ec2 describe_instances()
'''
allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
if filter_name not in allowed_filters:
raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
allowed_filters))
if filter_name in instance_data_filter_to_boto_attr:
boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
else:
boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
instance_value = instance
for attribute in boto_attr_list:
instance_value = self._compile_values(instance_value, attribute)
return instance_value
def _get_credentials(self):
'''
:return A dictionary of boto client credentials
'''
boto_params = {}
for credential in (('aws_access_key_id', self.aws_access_key_id),
('aws_secret_access_key', self.aws_secret_access_key),
('aws_session_token', self.aws_security_token)):
if credential[1]:
boto_params[credential[0]] = credential[1]
return boto_params
def _get_connection(self, credentials, region='us-east-1'):
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
return connection
def _boto3_assume_role(self, credentials, region):
"""
Assume an IAM role passed by iam_role_arn parameter
:return: a dict containing the credentials of the assumed role
"""
iam_role_arn = self.iam_role_arn
try:
sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
return dict(
aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
aws_session_token=sts_session['Credentials']['SessionToken']
)
except botocore.exceptions.ClientError as e:
raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
def _boto3_conn(self, regions):
'''
:param regions: A list of regions to create a boto3 client
Generator that yields a boto3 client and the region
'''
credentials = self._get_credentials()
iam_role_arn = self.iam_role_arn
if not regions:
try:
# as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
client = self._get_connection(credentials)
resp = client.describe_regions()
regions = [x['RegionName'] for x in resp.get('Regions', [])]
except botocore.exceptions.NoRegionError:
# above seems to fail depending on boto3 version, ignore and lets try something else
pass
# fallback to local list hardcoded in boto3 if still no regions
if not regions:
session = boto3.Session()
regions = session.get_available_regions('ec2')
# I give up, now you MUST give me regions
if not regions:
raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
for region in regions:
connection = self._get_connection(credentials, region)
try:
if iam_role_arn is not None:
assumed_credentials = self._boto3_assume_role(credentials, region)
else:
assumed_credentials = credentials
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
yield connection, region
def _get_instances_by_region(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions in which to describe instances
:param filters: a list of boto3 filter dictionaries
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
:return A list of instance dictionaries
'''
all_instances = []
for connection, region in self._boto3_conn(regions):
try:
# By default find non-terminated/terminating instances
if not any([f['Name'] == 'instance-state-name' for f in filters]):
filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
paginator = connection.get_paginator('describe_instances')
reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
instances = []
for r in reservations:
new_instances = r['Instances']
for instance in new_instances:
instance.update(self._get_reservation_details(r))
if self.get_option('include_extra_api_calls'):
instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
instances.extend(new_instances)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
instances = []
else:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
except botocore.exceptions.BotoCoreError as e:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
all_instances.extend(instances)
return sorted(all_instances, key=lambda x: x['InstanceId'])
def _get_reservation_details(self, reservation):
return {
'OwnerId': reservation['OwnerId'],
'RequesterId': reservation.get('RequesterId', ''),
'ReservationId': reservation['ReservationId']
}
def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
host_vars = {'Events': '', 'Persistent': False}
try:
kwargs = {'InstanceIds': [instance_id]}
host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
if spot_instance:
try:
kwargs = {'SpotInstanceRequestIds': [spot_instance]}
host_vars['Persistent'] = bool(
connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
return host_vars
def _get_tag_hostname(self, preference, instance):
tag_hostnames = preference.split('tag:', 1)[1]
if ',' in tag_hostnames:
tag_hostnames = tag_hostnames.split(',')
else:
tag_hostnames = [tag_hostnames]
tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
for v in tag_hostnames:
if '=' in v:
tag_name, tag_value = v.split('=')
if tags.get(tag_name) == tag_value:
return to_text(tag_name) + "_" + to_text(tag_value)
else:
tag_value = tags.get(v)
if tag_value:
return to_text(tag_value)
return None
def _get_hostname(self, instance, hostnames):
'''
:param instance: an instance dict returned by boto3 ec2 describe_instances()
:param hostnames: a list of hostname destination variables in order of preference
:return the preferred identifer for the host
'''
if not hostnames:
hostnames = ['dns-name', 'private-dns-name']
hostname = None
for preference in hostnames:
if 'tag' in preference:
if not preference.startswith('tag:'):
raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
hostname = self._get_tag_hostname(preference, instance)
else:
hostname = self._get_boto_attr_chain(preference, instance)
if hostname:
break
if hostname:
if ':' in to_text(hostname):
return self._sanitize_group_name((to_text(hostname)))
else:
return to_text(hostname)
def _query(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions to query
:param filters: a list of boto3 filter dictionaries
:param hostnames: a list of hostname destination variables in order of preference
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
'''
return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
def _populate(self, groups, hostnames):
for group in groups:
group = self.inventory.add_group(group)
self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
self.inventory.add_child('all', group)
def _add_hosts(self, hosts, group, hostnames):
'''
:param hosts: a list of hosts to be added to a group
:param group: the name of the group to which the hosts belong
:param hostnames: a list of hostname destination variables in order of preference
'''
for host in hosts:
hostname = self._get_hostname(host, hostnames)
host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
# Allow easier grouping by region
host['placement']['region'] = host['placement']['availability_zone'][:-1]
if not hostname:
continue
self.inventory.add_host(hostname, group=group)
for hostvar, hostval in host.items():
self.inventory.set_variable(hostname, hostvar, hostval)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.boto_profile = self.get_option('aws_profile')
self.aws_access_key_id = self.get_option('aws_access_key')
self.aws_secret_access_key = self.get_option('aws_secret_key')
self.aws_security_token = self.get_option('aws_security_token')
self.iam_role_arn = self.get_option('iam_role_arn')
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
session = botocore.session.get_session()
try:
credentials = session.get_credentials().get_frozen_credentials()
except AttributeError:
pass
else:
self.aws_access_key_id = credentials.access_key
self.aws_secret_access_key = credentials.secret_key
self.aws_security_token = credentials.token
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
"inventory configuration file or set them as environment variables.")
def verify_file(self, path):
'''
:param loader: an ansible.parsing.dataloader.DataLoader object
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
return True
display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
return False
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
if self.get_option('use_contrib_script_compatible_sanitization'):
self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
self._set_credentials()
# get user specifications
regions = self.get_option('regions')
filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
hostnames = self.get_option('hostnames')
strict_permissions = self.get_option('strict_permissions')
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self.get_option('cache')
# Generate inventory
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
except KeyError:
# if cache expires or cache file doesn't exist
cache_needs_update = True
if not cache or cache_needs_update:
results = self._query(regions, filters, strict_permissions)
self._populate(results, hostnames)
# If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
if cache_needs_update or (not cache and self.get_option('cache')):
self._cache[cache_key] = results
@staticmethod
def _legacy_script_compatible_group_sanitization(name):
# note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
regex = re.compile(r"[^A-Za-z0-9\_\-]")
return regex.sub('_', name)
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))

@ -1,335 +0,0 @@
#
# Copyright 2017 Michael De La Rue | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""This module adds shared support for generic Amazon AWS modules
**This code is not yet ready for use in user modules. As of 2017**
**and through to 2018, the interface is likely to change**
**aggressively as the exact correct interface for ansible AWS modules**
**is identified. In particular, until this notice goes away or is**
**changed, methods may disappear from the interface. Please don't**
**publish modules using this except directly to the main Ansible**
**development repository.**
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.aws import AnsibleAWSModule
module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
mutually_exclusive=list1, required_together=list2)
The 'AnsibleAWSModule' module provides similar, but more restricted,
interfaces to the normal Ansible module. It also includes the
additional methods for connecting to AWS using the standard module arguments
m.resource('lambda') # - get an AWS connection as a boto3 resource.
or
m.client('sts') # - get an AWS connection as a boto3 client.
To make use of AWSRetry easier, it can now be wrapped around any call from a
module-created client. To add retries to a client, create a client:
m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
Any calls from that client can be made to use the decorator passed at call-time
using the `aws_retry` argument. By default, no retries are used.
ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
The call will be retried the specified number of times, so the calling functions
don't need to be wrapped in the backoff decorator.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import logging
import traceback
from functools import wraps
from ansible.module_utils.compat.version import LooseVersion
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
# We will also export HAS_BOTO3 so end user modules can use it.
__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
class AnsibleAWSModule(object):
"""An ansible module class for AWS modules
AnsibleAWSModule provides an a class for building modules which
connect to Amazon Web Services. The interface is currently more
restricted than the basic module class with the aim that later the
basic module class can be reduced. If you find that any key
feature is missing please contact the author/Ansible AWS team
(available on #ansible-aws on IRC) to request the additional
features needed.
"""
default_settings = {
"default_args": True,
"check_boto3": True,
"auto_retry": True,
"module_class": AnsibleModule
}
def __init__(self, **kwargs):
local_settings = {}
for key in AnsibleAWSModule.default_settings:
try:
local_settings[key] = kwargs.pop(key)
except KeyError:
local_settings[key] = AnsibleAWSModule.default_settings[key]
self.settings = local_settings
if local_settings["default_args"]:
# ec2_argument_spec contains the region so we use that; there's a patch coming which
# will add it to aws_argument_spec so if that's accepted then later we should change
# over
argument_spec_full = ec2_argument_spec()
try:
argument_spec_full.update(kwargs["argument_spec"])
except (TypeError, NameError):
pass
kwargs["argument_spec"] = argument_spec_full
self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
if local_settings["check_boto3"] and not HAS_BOTO3:
self._module.fail_json(
msg=missing_required_lib('botocore or boto3'))
self.check_mode = self._module.check_mode
self._diff = self._module._diff
self._name = self._module._name
self._botocore_endpoint_log_stream = StringIO()
self.logger = None
if self.params.get('debug_botocore_endpoint_logs'):
self.logger = logging.getLogger('botocore.endpoint')
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
@property
def params(self):
return self._module.params
def _get_resource_action_list(self):
actions = []
for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
ln = ln.strip()
if not ln:
continue
found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
if found_operational_request:
operation_request = found_operational_request.group(0)[20:-1]
resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
actions.append("{0}:{1}".format(resource, operation_request))
return list(set(actions))
def exit_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.exit_json(*args, **kwargs)
def fail_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.fail_json(*args, **kwargs)
def debug(self, *args, **kwargs):
return self._module.debug(*args, **kwargs)
def warn(self, *args, **kwargs):
return self._module.warn(*args, **kwargs)
def deprecate(self, *args, **kwargs):
return self._module.deprecate(*args, **kwargs)
def boolean(self, *args, **kwargs):
return self._module.boolean(*args, **kwargs)
def md5(self, *args, **kwargs):
return self._module.md5(*args, **kwargs)
def client(self, service, retry_decorator=None):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
conn = boto3_conn(self, conn_type='client', resource=service,
region=region, endpoint=ec2_url, **aws_connect_kwargs)
return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
def resource(self, service):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
return boto3_conn(self, conn_type='resource', resource=service,
region=region, endpoint=ec2_url, **aws_connect_kwargs)
@property
def region(self, boto3=True):
return get_aws_region(self, boto3)
def fail_json_aws(self, exception, msg=None):
"""call fail_json with processed exception
function for converting exceptions thrown by AWS SDK modules,
botocore, boto3 and boto, into nice error messages.
"""
last_traceback = traceback.format_exc()
# to_native is trusted to handle exceptions that str() could
# convert to text.
try:
except_msg = to_native(exception.message)
except AttributeError:
except_msg = to_native(exception)
if msg is not None:
message = '{0}: {1}'.format(msg, except_msg)
else:
message = except_msg
try:
response = exception.response
except AttributeError:
response = None
failure = dict(
msg=message,
exception=last_traceback,
**self._gather_versions()
)
if response is not None:
failure.update(**camel_dict_to_snake_dict(response))
self.fail_json(**failure)
def _gather_versions(self):
"""Gather AWS SDK (boto3 and botocore) dependency versions
Returns {'boto3_version': str, 'botocore_version': str}
Returns {} if neither are installed
"""
if not HAS_BOTO3:
return {}
import boto3
import botocore
return dict(boto3_version=boto3.__version__,
botocore_version=botocore.__version__)
def boto3_at_least(self, desired):
"""Check if the available boto3 version is greater than or equal to a desired version.
Usage:
if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
# conditionally fail on old boto3 versions if a specific feature is not supported
module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
"""
existing = self._gather_versions()
return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
def botocore_at_least(self, desired):
"""Check if the available botocore version is greater than or equal to a desired version.
Usage:
if not module.botocore_at_least('1.2.3'):
module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
if not module.botocore_at_least('1.5.3'):
module.warn('Botocore did not include waiters for Service X before 1.5.3. '
'To wait until Service X resources are fully available, update botocore.')
"""
existing = self._gather_versions()
return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
class _RetryingBotoClientWrapper(object):
__never_wait = (
'get_paginator', 'can_paginate',
'get_waiter', 'generate_presigned_url',
)
def __init__(self, client, retry):
self.client = client
self.retry = retry
def _create_optional_retry_wrapper_function(self, unwrapped):
retrying_wrapper = self.retry(unwrapped)
@wraps(unwrapped)
def deciding_wrapper(aws_retry=False, *args, **kwargs):
if aws_retry:
return retrying_wrapper(*args, **kwargs)
else:
return unwrapped(*args, **kwargs)
return deciding_wrapper
def __getattr__(self, name):
unwrapped = getattr(self.client, name)
if name in self.__never_wait:
return unwrapped
elif callable(unwrapped):
wrapped = self._create_optional_retry_wrapper_function(unwrapped)
setattr(self, name, wrapped)
return wrapped
else:
return unwrapped
def is_boto3_error_code(code, e=None):
"""Check if the botocore exception is raised by a specific error code.
Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
Example:
try:
ec2.describe_instances(InstanceIds=['potato'])
except is_boto3_error_code('InvalidInstanceID.Malformed'):
# handle the error for that code case
except botocore.exceptions.ClientError as e:
# handle the generic error case for all other codes
"""
from botocore.exceptions import ClientError
if e is None:
import sys
dummy, e, dummy = sys.exc_info()
if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
return ClientError
return type('NeverEverRaisedException', (Exception,), {})
def get_boto3_client_method_parameters(client, method_name, required=False):
op = client.meta.method_to_api_mapping.get(method_name)
input_shape = client._service_model.operation_model(op).input_shape
if not input_shape:
parameters = []
elif required:
parameters = list(input_shape.required_members)
else:
parameters = list(input_shape.members.keys())
return parameters

@ -1,49 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
try:
from botocore.exceptions import ClientError, NoCredentialsError
except ImportError:
pass # caught by HAS_BOTO3
from ansible.module_utils._text import to_native
def get_aws_account_id(module):
""" Given AnsibleAWSModule instance, get the active AWS account ID
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privilages to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = module.client('sts')
account_id = sts_client.get_caller_identity().get('Account')
# non-STS sessions may also get NoCredentialsError from this STS call, so
# we must catch that too and try the IAM version
except (ClientError, NoCredentialsError):
try:
iam_client = module.client('iam')
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e)
# don't match on `arn:aws` because of China region `arn:aws-cn` and similar
account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="Could not get AWS account information")
except Exception as e:
module.fail_json(
msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
exception=traceback.format_exc()
)
if not account_id:
module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
return to_native(account_id)

@ -1,50 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by the calling module
HAS_MD5 = True
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
if '-' in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split('-')[1])
digests = []
s3_kwargs = dict(
Bucket=bucket,
Key=obj,
)
if version:
s3_kwargs['VersionId'] = version
with open(filename, 'rb') as f:
for part_num in range(1, parts + 1):
s3_kwargs['PartNumber'] = part_num
try:
head = s3.head_object(**s3_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
digests.append(md5(f.read(int(head['ContentLength']))))
digest_squared = md5(b''.join(m.digest() for m in digests))
return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))

@ -1,405 +0,0 @@
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import botocore.waiter as core_waiter
except ImportError:
pass # caught by HAS_BOTO3
ec2_data = {
"version": 2,
"waiters": {
"InternetGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeInternetGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(InternetGateways) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidInternetGatewayID.NotFound",
"state": "retry"
},
]
},
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(RouteTables[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidRouteTableID.NotFound",
"state": "retry"
},
]
},
"SecurityGroupExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSecurityGroups",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(SecurityGroups[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidGroup.NotFound",
"state": "retry"
},
]
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "retry"
},
]
},
"SubnetHasMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetNoMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetHasAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetNoAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "retry"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "success"
},
]
},
"VpnGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(VpnGateways[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidVpnGatewayID.NotFound",
"state": "retry"
},
]
},
"VpnGatewayDetached": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "VpnGateways[0].State == 'available'",
"state": "success"
},
]
},
}
}
waf_data = {
"version": 2,
"waiters": {
"ChangeTokenInSync": {
"delay": 20,
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "ChangeTokenStatus == 'INSYNC'",
"state": "success"
},
{
"matcher": "error",
"expected": "WAFInternalErrorException",
"state": "retry"
}
]
}
}
}
eks_data = {
"version": 2,
"waiters": {
"ClusterActive": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "success",
"matcher": "path",
"argument": "cluster.status",
"expected": "ACTIVE"
},
{
"state": "retry",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
},
"ClusterDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "retry",
"matcher": "path",
"argument": "cluster.status != 'DELETED'",
"expected": True
},
{
"state": "success",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
}
}
}
rds_data = {
"version": 2,
"waiters": {
"DBInstanceStopped": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeDBInstances",
"acceptors": [
{
"state": "success",
"matcher": "pathAll",
"argument": "DBInstances[].DBInstanceStatus",
"expected": "stopped"
},
]
}
}
}
def ec2_model(name):
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
return ec2_models.get_waiter(name)
def waf_model(name):
waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
return waf_models.get_waiter(name)
def eks_model(name):
eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
return eks_models.get_waiter(name)
def rds_model(name):
rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
return rds_models.get_waiter(name)
waiters_by_name = {
('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
'internet_gateway_exists',
ec2_model('InternetGatewayExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_internet_gateways
)),
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
'route_table_exists',
ec2_model('RouteTableExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_route_tables
)),
('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
'security_group_exists',
ec2_model('SecurityGroupExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_security_groups
)),
('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
'subnet_exists',
ec2_model('SubnetExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_has_map_public',
ec2_model('SubnetHasMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_no_map_public',
ec2_model('SubnetNoMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_has_assign_ipv6',
ec2_model('SubnetHasAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_no_assign_ipv6',
ec2_model('SubnetNoAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
'subnet_deleted',
ec2_model('SubnetDeleted'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
'vpn_gateway_exists',
ec2_model('VpnGatewayExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_vpn_gateways
)),
('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
'vpn_gateway_detached',
ec2_model('VpnGatewayDetached'),
core_waiter.NormalizedOperationMethod(
ec2.describe_vpn_gateways
)),
('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
'cluster_active',
eks_model('ClusterActive'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
'cluster_deleted',
eks_model('ClusterDeleted'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
'db_instance_stopped',
rds_model('DBInstanceStopped'),
core_waiter.NormalizedOperationMethod(
rds.describe_db_instances
)),
}
def get_waiter(client, waiter_name):
try:
return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
except KeyError:
raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))

@ -1,217 +0,0 @@
#
# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import CloudRetry
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.exponential_backoff(retries=10, delay=3)
get_ec2_security_group_ids_from_names()
@AWSRetry.jittered_backoff()
get_ec2_security_group_ids_from_names()
"""
import random
from functools import wraps
import syslog
import time
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
""" Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
backoff (float): base of the exponent to use for exponential
backoff.
max_delay (int): Optional. If provided each delay generated is capped
at this amount. Defaults to 60 seconds.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for an exponential backoff strategy.
Usage:
>>> backoff = _exponential_backoff()
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff ** retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
""" Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Approximate number of seconds to sleep for the first
retry.
max_delay (int): The maximum number of seconds to sleep for any retry.
_random (random.Random or None): Makes this generator testable by
allowing developers to explicitly pass in the a seeded Random.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for a full jitter backoff strategy.
Usage:
>>> backoff = _full_jitter_backoff(retries=5)
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[3, 6, 5, 23, 38]
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
return backoff_gen
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code, catch_extra_error_codes=None):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
""" Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
for delay in backoff_strategy():
try:
return f(*args, **kwargs)
except Exception as e:
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code, catch_extra_error_codes):
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(delay)
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
@classmethod
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_exponential_backoff(
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using a jittered backoff
strategy. More on this strategy here:
https://www.awsarchitectureblog.com/2015/03/backoff.html
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_full_jitter_backoff(
retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Compatibility for the original implementation of CloudRetry.backoff that
did not provide configurable backoff strategies. Developers should use
CloudRetry.exponential_backoff instead.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)

@ -1,758 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import sys
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
BOTO_IMP_ERR = None
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
BOTO_IMP_ERR = traceback.format_exc()
HAS_BOTO = False
BOTO3_IMP_ERR = None
try:
import boto3
import botocore
HAS_BOTO3 = True
except Exception:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
return response_code in retry_on
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
if params.get('config') is not None:
config = config.merge(params.pop('config'))
if params.get('aws_config') is not None:
config = config.merge(params.pop('aws_config'))
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
aws_config=dict(type='dict'),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_region(module, boto3=False):
region = module.params.get('region')
if region:
return region
if 'AWS_REGION' in os.environ:
return os.environ['AWS_REGION']
if 'AWS_DEFAULT_REGION' in os.environ:
return os.environ['AWS_DEFAULT_REGION']
if 'EC2_REGION' in os.environ:
return os.environ['EC2_REGION']
if not boto3:
if not HAS_BOTO:
module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if region:
return region
return boto.config.get('Boto', 'ec2_region')
if not HAS_BOTO3:
module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
profile_name = module.params.get('profile')
return botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
return None
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = get_aws_region(module, boto3)
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
config = module.params.get('aws_config')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
if config is not None:
if HAS_BOTO3 and boto3:
boto_params['aws_config'] = botocore.config.Config(**config)
elif HAS_BOTO and not boto3:
if 'user_agent' in config:
sys.modules["boto.connection"].UserAgent = config['user_agent']
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
# Amazon will automatically convert bool and int to strings for us
if isinstance(policy, bool):
return tuple([str(policy).lower()])
elif isinstance(policy, int):
return tuple([str(policy)])
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset

File diff suppressed because it is too large Load Diff

@ -1,279 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_info
version_added: '2.5'
short_description: Gather information about ec2 AMIs
description:
- Gather information about ec2 AMIs
- This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
author:
- Prasad Katti (@prasadkatti)
requirements: [ boto3 ]
options:
image_ids:
description: One or more image IDs.
aliases: [image_id]
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
- Filter names and values are case sensitive.
type: dict
owners:
description:
- Filter the images by the owner. Valid options are an AWS account ID, self,
or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
aliases: [owner]
type: list
elements: str
executable_users:
description:
- Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
aliases: [executable_user]
type: list
elements: str
describe_image_attributes:
description:
- Describe attributes (like launchPermission) of the images found.
default: no
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: gather information about an AMI using ami-id
ec2_ami_info:
image_ids: ami-5b488823
- name: gather information about all AMIs with tag key Name and value webapp
ec2_ami_info:
filters:
"tag:Name": webapp
- name: gather information about an AMI with 'AMI Name' equal to foobar
ec2_ami_info:
filters:
name: foobar
- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
ec2_ami_info:
owners: 099720109477
filters:
name: "ubuntu/images/ubuntu-zesty-17.04-*"
'''
RETURN = '''
images:
description: A list of images.
returned: always
type: list
elements: dict
contains:
architecture:
description: The architecture of the image.
returned: always
type: str
sample: x86_64
block_device_mappings:
description: Any block device mapping entries.
returned: always
type: list
elements: dict
contains:
device_name:
description: The device name exposed to the instance.
returned: always
type: str
sample: /dev/sda1
ebs:
description: EBS volumes
returned: always
type: complex
creation_date:
description: The date and time the image was created.
returned: always
type: str
sample: '2017-10-16T19:22:13.000Z'
description:
description: The description of the AMI.
returned: always
type: str
sample: ''
ena_support:
description: Whether enhanced networking with ENA is enabled.
returned: always
type: bool
sample: true
hypervisor:
description: The hypervisor type of the image.
returned: always
type: str
sample: xen
image_id:
description: The ID of the AMI.
returned: always
type: str
sample: ami-5b466623
image_location:
description: The location of the AMI.
returned: always
type: str
sample: 408466080000/Webapp
image_type:
description: The type of image.
returned: always
type: str
sample: machine
launch_permissions:
description: A List of AWS accounts may launch the AMI.
returned: When image is owned by calling account and I(describe_image_attributes) is yes.
type: list
elements: dict
contains:
group:
description: A value of 'all' means the AMI is public.
type: str
user_id:
description: An AWS account ID with permissions to launch the AMI.
type: str
sample: [{"group": "all"}, {"user_id": "408466080000"}]
name:
description: The name of the AMI that was provided during image creation.
returned: always
type: str
sample: Webapp
owner_id:
description: The AWS account ID of the image owner.
returned: always
type: str
sample: '408466080000'
public:
description: Whether the image has public launch permissions.
returned: always
type: bool
sample: true
root_device_name:
description: The device name of the root device.
returned: always
type: str
sample: /dev/sda1
root_device_type:
description: The type of root device used by the AMI.
returned: always
type: str
sample: ebs
sriov_net_support:
description: Whether enhanced networking is enabled.
returned: always
type: str
sample: simple
state:
description: The current state of the AMI.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the image.
returned: always
type: dict
virtualization_type:
description: The type of virtualization of the AMI.
returned: always
type: str
sample: hvm
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
def list_ec2_images(ec2_client, module):
image_ids = module.params.get("image_ids")
owners = module.params.get("owners")
executable_users = module.params.get("executable_users")
filters = module.params.get("filters")
owner_param = []
# describe_images is *very* slow if you pass the `Owners`
# param (unless it's self), for some reason.
# Converting the owners to filters and removing from the
# owners param greatly speeds things up.
# Implementation based on aioue's suggestion in #24886
for owner in owners:
if owner.isdigit():
if 'owner-id' not in filters:
filters['owner-id'] = list()
filters['owner-id'].append(owner)
elif owner == 'self':
# self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
owner_param.append(owner)
else:
if 'owner-alias' not in filters:
filters['owner-alias'] = list()
filters['owner-alias'].append(owner)
filters = ansible_dict_to_boto3_filter_list(filters)
try:
images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
except (ClientError, BotoCoreError) as err:
module.fail_json_aws(err, msg="error describing images")
for image in images:
try:
image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
if module.params.get("describe_image_attributes"):
launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
except (ClientError, BotoCoreError) as err:
# describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
pass
images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
module.exit_json(images=images)
def main():
argument_spec = dict(
image_ids=dict(default=[], type='list', aliases=['image_id']),
filters=dict(default={}, type='dict'),
owners=dict(default=[], type='list', aliases=['owner']),
executable_users=dict(default=[], type='list', aliases=['executable_user']),
describe_image_attributes=dict(default=False, type='bool')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
ec2_client = module.client('ec2')
list_ec2_images(ec2_client, module)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,524 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create, modify, and terminate AWS virtual private clouds.
version_added: "2.0"
author:
- Jonathan Davila (@defionscode)
- Sloane Hertel (@s-hertel)
options:
name:
description:
- The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
required: yes
type: str
cidr_block:
description:
- The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
and is used in conjunction with the C(name) to ensure idempotence.
required: yes
type: list
elements: str
ipv6_cidr:
description:
- Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
or the size of the CIDR block.
default: False
type: bool
version_added: '2.10'
purge_cidrs:
description:
- Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
default: no
type: bool
version_added: '2.5'
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
default: default
choices: [ 'default', 'dedicated' ]
type: str
dns_support:
description:
- Whether to enable AWS DNS support.
default: yes
type: bool
dns_hostnames:
description:
- Whether to enable AWS hostname support.
default: yes
type: bool
dhcp_opts_id:
description:
- The id of the DHCP options to use for this VPC.
type: str
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
the VPC if it's different.
aliases: [ 'resource_tags' ]
type: dict
state:
description:
- The state of the VPC. Either absent or present.
default: present
choices: [ 'present', 'absent' ]
type: str
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
duplicate VPCs created.
type: bool
default: false
requirements:
- boto3
- botocore
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: create a VPC with dedicated tenancy and a couple of tags
ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
ipv6_cidr: True
region: us-east-1
tenancy: dedicated
'''
RETURN = '''
vpc:
description: info about the VPC that was created or deleted
returned: always
type: complex
contains:
cidr_block:
description: The CIDR of the VPC
returned: always
type: str
sample: 10.0.0.0/16
cidr_block_association_set:
description: IPv4 CIDR blocks associated with the VPC
returned: success
type: list
sample:
"cidr_block_association_set": [
{
"association_id": "vpc-cidr-assoc-97aeeefd",
"cidr_block": "20.0.0.0/24",
"cidr_block_state": {
"state": "associated"
}
}
]
classic_link_enabled:
description: indicates whether ClassicLink is enabled
returned: always
type: bool
sample: false
dhcp_options_id:
description: the id of the DHCP options associated with this VPC
returned: always
type: str
sample: dopt-0fb8bd6b
id:
description: VPC resource id
returned: always
type: str
sample: vpc-c2e00da5
instance_tenancy:
description: indicates whether VPC uses default or dedicated tenancy
returned: always
type: str
sample: default
ipv6_cidr_block_association_set:
description: IPv6 CIDR blocks associated with the VPC
returned: success
type: list
sample:
"ipv6_cidr_block_association_set": [
{
"association_id": "vpc-cidr-assoc-97aeeefd",
"ipv6_cidr_block": "2001:db8::/56",
"ipv6_cidr_block_state": {
"state": "associated"
}
}
]
is_default:
description: indicates whether this is the default VPC
returned: always
type: bool
sample: false
state:
description: state of the VPC
returned: always
type: str
sample: available
tags:
description: tags attached to the VPC, includes name
returned: always
type: complex
contains:
Name:
description: name tag for the VPC
returned: always
type: str
sample: pk_vpc4
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from time import sleep, time
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.module_utils.network.common.utils import to_subnet
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns None or a vpc object depending on the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return None.
"""
try:
matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
# If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
if not matching_vpcs:
matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
if multi:
return None
elif len(matching_vpcs) == 1:
return matching_vpcs[0]['VpcId']
elif len(matching_vpcs) > 1:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return None
@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
def get_classic_link_with_backoff(connection, vpc_id):
try:
return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
return False
else:
raise
def get_vpc(module, connection, vpc_id):
# wait for vpc to be available
try:
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
try:
vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
try:
vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
return vpc_obj
def update_vpc_tags(connection, module, vpc_id, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
tags = dict((k, to_native(v)) for k, v in tags.items())
try:
current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
if tags_to_update:
if not module.check_mode:
tags = ansible_dict_to_boto3_tag_list(tags_to_update)
vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
# Wait for tags to be updated
expected_tags = boto3_tag_list_to_ansible_dict(tags)
filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
return True
else:
return False
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update tags")
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj['DhcpOptionsId'] != dhcp_id:
if not module.check_mode:
try:
connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
try:
# Wait for DhcpOptionsId to be updated
filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
return True
else:
return False
def create_vpc(connection, module, cidr_block, tenancy):
try:
if not module.check_mode:
vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
else:
module.exit_json(changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to create the VPC")
# wait for vpc to exist
try:
connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
return vpc_obj['Vpc']['VpcId']
def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
start_time = time()
updated = False
while time() < start_time + 300:
current_value = connection.describe_vpc_attribute(
Attribute=attribute,
VpcId=vpc_id
)['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
if current_value != expected_value:
sleep(3)
else:
updated = True
break
if not updated:
module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
def get_cidr_network_bits(module, cidr_block):
fixed_cidrs = []
for cidr in cidr_block:
split_addr = cidr.split('/')
if len(split_addr) == 2:
# this_ip is a IPv4 CIDR that may or may not have host bits set
# Get the network bits.
valid_cidr = to_subnet(split_addr[0], split_addr[1])
if cidr != valid_cidr:
module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
fixed_cidrs.append(valid_cidr)
else:
# let AWS handle invalid CIDRs
fixed_cidrs.append(cidr)
return fixed_cidrs
def main():
argument_spec = dict(
name=dict(required=True),
cidr_block=dict(type='list', required=True),
ipv6_cidr=dict(type='bool', default=False),
tenancy=dict(choices=['default', 'dedicated'], default='default'),
dns_support=dict(type='bool', default=True),
dns_hostnames=dict(type='bool', default=True),
dhcp_opts_id=dict(),
tags=dict(type='dict', aliases=['resource_tags']),
state=dict(choices=['present', 'absent'], default='present'),
multi_ok=dict(type='bool', default=False),
purge_cidrs=dict(type='bool', default=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
name = module.params.get('name')
cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
ipv6_cidr = module.params.get('ipv6_cidr')
purge_cidrs = module.params.get('purge_cidrs')
tenancy = module.params.get('tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
dhcp_id = module.params.get('dhcp_opts_id')
tags = module.params.get('tags')
state = module.params.get('state')
multi = module.params.get('multi_ok')
changed = False
connection = module.client(
'ec2',
retry_decorator=AWSRetry.jittered_backoff(
retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
)
)
if dns_hostnames and not dns_support:
module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_id is None:
vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
changed = True
vpc_obj = get_vpc(module, connection, vpc_id)
associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
if cidr['CidrBlockState']['State'] != 'disassociated')
to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
if len(cidr_block) > 1:
for cidr in to_add:
changed = True
try:
connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
if ipv6_cidr:
if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
vpc_id,
vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
else:
try:
connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
if purge_cidrs:
for association_id in to_remove:
changed = True
try:
connection.disassociate_vpc_cidr_block(AssociationId=association_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
"are associated with the CIDR block before you can disassociate it.".format(association_id))
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update DHCP options")
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_id, tags, name):
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update tags")
current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
if current_dns_enabled != dns_support:
changed = True
if not module.check_mode:
try:
connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update enabled dns support attribute")
if current_dns_hostnames != dns_hostnames:
changed = True
if not module.check_mode:
try:
connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
# wait for associated cidrs to match
if to_add or to_remove:
try:
connection.get_waiter('vpc_available').wait(
VpcIds=[vpc_id],
Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to wait for CIDRs to update")
# try to wait for enableDnsSupport and enableDnsHostnames to match
wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
final_state['id'] = final_state.pop('vpc_id')
module.exit_json(changed=changed, vpc=final_state)
elif state == 'absent':
# Check if VPC exists
vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_id is not None:
try:
if not module.check_mode:
connection.delete_vpc(VpcId=vpc_id)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
module.exit_json(changed=changed, vpc={})
if __name__ == '__main__':
main()

@ -1,604 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet
short_description: Manage subnets in AWS virtual private clouds
description:
- Manage subnets in AWS virtual private clouds.
version_added: "2.0"
author:
- Robert Estelle (@erydo)
- Brad Davidson (@brandond)
requirements: [ boto3 ]
options:
az:
description:
- "The availability zone for the subnet."
type: str
cidr:
description:
- "The CIDR block for the subnet. E.g. 192.0.2.0/24."
type: str
required: true
ipv6_cidr:
description:
- "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
- "Required if I(assign_instances_ipv6=true)"
version_added: "2.5"
type: str
tags:
description:
- "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
aliases: [ 'resource_tags' ]
type: dict
state:
description:
- "Create or remove the subnet."
default: present
choices: [ 'present', 'absent' ]
type: str
vpc_id:
description:
- "VPC ID of the VPC in which to create or delete the subnet."
required: true
type: str
map_public:
description:
- "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
type: bool
default: 'no'
version_added: "2.4"
assign_instances_ipv6:
description:
- "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
type: bool
default: false
version_added: "2.5"
wait:
description:
- "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
type: bool
default: true
version_added: "2.5"
wait_timeout:
description:
- "Number of seconds to wait for subnet to become available I(wait=True)."
default: 300
version_added: "2.5"
type: int
purge_tags:
description:
- Whether or not to remove tags that do not appear in the I(tags) list.
type: bool
default: true
version_added: "2.5"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create subnet for database servers
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.0.1.16/28
tags:
Name: Database Subnet
register: database_subnet
- name: Remove subnet for database servers
ec2_vpc_subnet:
state: absent
vpc_id: vpc-123456
cidr: 10.0.1.16/28
- name: Create subnet with IPv6 block assigned
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.1.100.0/24
ipv6_cidr: 2001:db8:0:102::/64
- name: Remove IPv6 block assigned to subnet
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.1.100.0/24
ipv6_cidr: ''
'''
RETURN = '''
subnet:
description: Dictionary of subnet values
returned: I(state=present)
type: complex
contains:
id:
description: Subnet resource id
returned: I(state=present)
type: str
sample: subnet-b883b2c4
cidr_block:
description: The IPv4 CIDR of the Subnet
returned: I(state=present)
type: str
sample: "10.0.0.0/16"
ipv6_cidr_block:
description: The IPv6 CIDR block actively associated with the Subnet
returned: I(state=present)
type: str
sample: "2001:db8:0:102::/64"
availability_zone:
description: Availability zone of the Subnet
returned: I(state=present)
type: str
sample: us-east-1a
state:
description: state of the Subnet
returned: I(state=present)
type: str
sample: available
tags:
description: tags attached to the Subnet, includes name
returned: I(state=present)
type: dict
sample: {"Name": "My Subnet", "env": "staging"}
map_public_ip_on_launch:
description: whether public IP is auto-assigned to new instances
returned: I(state=present)
type: bool
sample: false
assign_ipv6_address_on_creation:
description: whether IPv6 address is auto-assigned to new instances
returned: I(state=present)
type: bool
sample: false
vpc_id:
description: the id of the VPC where this Subnet exists
returned: I(state=present)
type: str
sample: vpc-67236184
available_ip_address_count:
description: number of available IPv4 addresses
returned: I(state=present)
type: str
sample: 251
default_for_az:
description: indicates whether this is the default Subnet for this Availability Zone
returned: I(state=present)
type: bool
sample: false
ipv6_association_id:
description: The IPv6 association ID for the currently associated CIDR
returned: I(state=present)
type: str
sample: subnet-cidr-assoc-b85c74d2
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: I(state=present)
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the subnet.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import time
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils._text import to_text
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
def get_subnet_info(subnet):
if 'Subnets' in subnet:
return [get_subnet_info(s) for s in subnet['Subnets']]
elif 'Subnet' in subnet:
subnet = camel_dict_to_snake_dict(subnet['Subnet'])
else:
subnet = camel_dict_to_snake_dict(subnet)
if 'tags' in subnet:
subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
else:
subnet['tags'] = dict()
if 'subnet_id' in subnet:
subnet['id'] = subnet['subnet_id']
del subnet['subnet_id']
subnet['ipv6_cidr_block'] = ''
subnet['ipv6_association_id'] = ''
ipv6set = subnet.get('ipv6_cidr_block_association_set')
if ipv6set:
for item in ipv6set:
if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
subnet['ipv6_association_id'] = item['association_id']
return subnet
@AWSRetry.exponential_backoff()
def describe_subnets_with_backoff(client, **params):
return client.describe_subnets(**params)
def waiter_params(module, params, start_time):
if not module.botocore_at_least("1.7.0"):
remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
return params
def handle_waiter(conn, module, waiter_name, params, start_time):
try:
get_waiter(conn, waiter_name).wait(
**waiter_params(module, params, start_time)
)
except botocore.exceptions.WaiterError as e:
module.fail_json_aws(e, "Failed to wait for updates to complete")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "An exception happened while trying to wait for updates")
def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
params = dict(VpcId=vpc_id,
CidrBlock=cidr)
if ipv6_cidr:
params['Ipv6CidrBlock'] = ipv6_cidr
if az:
params['AvailabilityZone'] = az
try:
subnet = get_subnet_info(conn.create_subnet(**params))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create subnet")
# Sometimes AWS takes its time to create a subnet and so using
# new subnets's id to do things like create tags results in
# exception.
if wait and subnet.get('state') != 'available':
handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
try:
conn.get_waiter('subnet_available').wait(
**waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
)
subnet['state'] = 'available'
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
return subnet
def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
changed = False
filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
try:
cur_tags = conn.describe_tags(Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't describe tags")
to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
if to_update:
try:
if not module.check_mode:
AWSRetry.exponential_backoff(
catch_extra_error_codes=['InvalidSubnetID.NotFound']
)(conn.create_tags)(
Resources=[subnet['id']],
Tags=ansible_dict_to_boto3_tag_list(to_update)
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create tags")
if to_delete:
try:
if not module.check_mode:
tags_list = []
for key in to_delete:
tags_list.append({'Key': key})
AWSRetry.exponential_backoff(
catch_extra_error_codes=['InvalidSubnetID.NotFound']
)(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete tags")
if module.params['wait'] and not module.check_mode:
# Wait for tags to be updated
filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
return changed
def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
if check_mode:
return
try:
conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
if check_mode:
return
try:
conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
def disassociate_ipv6_cidr(conn, module, subnet, start_time):
if subnet.get('assign_ipv6_address_on_creation'):
ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
try:
conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
.format(subnet['ipv6_association_id'], subnet['id']))
# Wait for cidr block to be disassociated
if module.params['wait']:
filters = ansible_dict_to_boto3_filter_list(
{'ipv6-cidr-block-association.state': ['disassociated'],
'vpc-id': subnet['vpc_id']}
)
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
wait = module.params['wait']
changed = False
if subnet['ipv6_association_id'] and not ipv6_cidr:
if not check_mode:
disassociate_ipv6_cidr(conn, module, subnet, start_time)
changed = True
if ipv6_cidr:
filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
'vpc-id': subnet['vpc_id']})
try:
check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get subnet info")
if check_subnets and check_subnets[0]['ipv6_cidr_block']:
module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
if subnet['ipv6_association_id']:
if not check_mode:
disassociate_ipv6_cidr(conn, module, subnet, start_time)
changed = True
try:
if not check_mode:
associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
else:
if not check_mode and wait:
filters = ansible_dict_to_boto3_filter_list(
{'ipv6-cidr-block-association.state': ['associated'],
'vpc-id': subnet['vpc_id']}
)
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
if subnet['ipv6_cidr_block_association_set']:
subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
else:
subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
return changed
def get_matching_subnet(conn, module, vpc_id, cidr):
filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
try:
subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get matching subnet")
if subnets:
return subnets[0]
return None
def ensure_subnet_present(conn, module):
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
changed = False
# Initialize start so max time does not exceed the specified wait_timeout for multiple operations
start_time = time.time()
if subnet is None:
if not module.check_mode:
subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
changed = True
# Subnet will be None when check_mode is true
if subnet is None:
return {
'changed': changed,
'subnet': {}
}
if module.params['wait']:
handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
changed = True
if module.params['map_public'] != subnet['map_public_ip_on_launch']:
ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
changed = True
if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
changed = True
if module.params['tags'] != subnet['tags']:
stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
changed = True
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
if not module.check_mode and module.params['wait']:
# GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
# so we only wait for those if necessary just before returning the subnet
subnet = ensure_final_subnet(conn, module, subnet, start_time)
return {
'changed': changed,
'subnet': subnet
}
def ensure_final_subnet(conn, module, subnet, start_time):
for rewait in range(0, 30):
map_public_correct = False
assign_ipv6_correct = False
if module.params['map_public'] == subnet['map_public_ip_on_launch']:
map_public_correct = True
else:
if module.params['map_public']:
handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
else:
handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
assign_ipv6_correct = True
else:
if module.params['assign_instances_ipv6']:
handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
else:
handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
if map_public_correct and assign_ipv6_correct:
break
time.sleep(5)
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
return subnet
def ensure_subnet_absent(conn, module):
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
if subnet is None:
return {'changed': False}
try:
if not module.check_mode:
conn.delete_subnet(SubnetId=subnet['id'])
if module.params['wait']:
handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
return {'changed': True}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete subnet")
def main():
argument_spec = dict(
az=dict(default=None, required=False),
cidr=dict(required=True),
ipv6_cidr=dict(default='', required=False),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(required=True),
map_public=dict(default=False, required=False, type='bool'),
assign_instances_ipv6=dict(default=False, required=False, type='bool'),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300, required=False),
purge_tags=dict(default=True, type='bool')
)
required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
if not module.botocore_at_least("1.7.0"):
module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
connection = module.client('ec2')
state = module.params.get('state')
try:
if state == 'present':
result = ensure_subnet_present(connection, module)
elif state == 'absent':
result = ensure_subnet_absent(connection, module)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
module.exit_json(**result)
if __name__ == '__main__':
main()
Loading…
Cancel
Save