Fix inventory plugin cache + add tests (#38229)

* Fix setting the cache when refresh_cache or --flush-cache are used

* Use jsonify function that handles datetime objects in jsonfile cache plugin

* Don't access self._options directly

* Add initial integration tests for aws_ec2 inventory plugin

* Add CI alias

* Fix and add a few more unit tests

* Add integration tests for constructed

* Fix typo

* Use inventory config templates

* Collect all instances that are not terminated by default

* Create separate playbook for setting up the VPC, subnet, security group, and finding an image for the host

Create a separate playbook for removing the resources

* Allow easier grouping by region and add an example

* use a unified json encode/decode that can handle unsafe and vault
pull/39080/merge
Sloane Hertel 7 years ago committed by Ryan Brown
parent 0ad4b7b785
commit cba64f5869

@ -49,7 +49,7 @@ try:
except ImportError:
import json
from ansible.parsing.utils.jsonify import jsonify
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseFileCacheModule
@ -61,8 +61,8 @@ class CacheModule(BaseFileCacheModule):
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
return json.load(f, cls=AnsibleJSONDecoder)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(jsonify(value, format=True))
f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))

@ -94,6 +94,9 @@ keyed_groups:
# create a group for each value of the Application tag
- key: tag.Application
separator: ''
# create a group per region e.g. aws_region_us_east_2
- key: placement.region
prefix: aws_region
'''
from ansible.errors import AnsibleError, AnsibleParserError
@ -307,6 +310,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for connection, region in self._boto3_conn(regions):
try:
# By default find non-terminated/terminating instances
if not any([f['Name'] == 'instance-state-name' for f in filters]):
filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
paginator = connection.get_paginator('describe_instances')
reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
instances = []
@ -419,6 +425,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
# Allow easier grouping by region
host['placement']['region'] = host['placement']['availability_zone'][:-1]
if not hostname:
continue
self.inventory.add_host(hostname, group=group)
@ -427,29 +436,26 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# Use constructed if applicable
strict = self._options.get('strict', False)
strict = self.get_option('strict')
# Composed variables
if self._options.get('compose'):
self._set_composite_vars(self._options.get('compose'), host, hostname, strict=strict)
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinaj2 conditionals, hosts that meet the conditional are added to group
if self._options.get('groups'):
self._add_host_to_composed_groups(self._options.get('groups'), host, hostname, strict=strict)
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
if self._options.get('keyed_groups'):
self._add_host_to_keyed_groups(self._options.get('keyed_groups'), host, hostname, strict=strict)
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.boto_profile = self._options.get('boto_profile')
self.aws_access_key_id = self._options.get('aws_access_key_id')
self.aws_secret_access_key = self._options.get('aws_secret_access_key')
self.aws_security_token = self._options.get('aws_security_token')
self.boto_profile = self.get_option('boto_profile')
self.aws_access_key_id = self.get_option('aws_access_key_id')
self.aws_secret_access_key = self.get_option('aws_secret_access_key')
self.aws_security_token = self.get_option('aws_security_token')
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
session = botocore.session.get_session()
@ -525,13 +531,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# get user specifications
regions, filters, hostnames, strict_permissions = self._get_query_options(config_data)
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self._options.get('cache')
cache_key = self.get_cache_key(path)
else:
cache_key = None
cache = self.get_option('cache')
# Generate inventory
formatted_inventory = {}
@ -550,5 +554,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._populate(results, hostnames)
formatted_inventory = self._format_inventory(results, hostnames)
if cache_needs_update:
# If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
if cache_needs_update or (not cache and self.get_option('cache')):
self.cache.set(cache_key, formatted_inventory)

@ -0,0 +1,2 @@
cloud/aws
posix/ci/cloud/group4/aws

@ -0,0 +1,11 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
vars:
template_name: "../templates/{{ template | default('inventory.yml') }}"
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: "{{ lookup('template', template_name) }}"

@ -0,0 +1,9 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: ""

@ -0,0 +1,63 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, add it to inventory and then terminate it without updating the cache
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,62 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: get image ID to create an instance
ec2_ami_facts:
filters:
architecture: x86_64
owner-id: '125523088429'
virtualization-type: hvm
root-device-type: ebs
name: 'Fedora-Atomic-27*'
<<: *aws_connection_info
register: fedora_images
- set_fact:
image_id: '{{ fedora_images.images.0.image_id }}'
- name: create a VPC to work in
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: present
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_vpc
- set_fact:
vpc_id: '{{ setup_vpc.vpc.id }}'
- name: create a subnet to use for creating an ec2 instance
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ setup_vpc.vpc.id }}'
cidr: 10.10.0.0/24
state: present
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_subnet
- set_fact:
subnet_id: '{{ setup_subnet.subnet.id }}'
- name: create a security group to use for creating an ec2 instance
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: present
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
register: setup_sg
- set_fact:
sg_id: '{{ setup_sg.group_id }}'

@ -0,0 +1,39 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: remove setup security group
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: absent
vpc_id: '{{ vpc_id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup subnet
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ vpc_id }}'
cidr: 10.10.0.0/24
state: absent
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup VPC
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: absent
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes

@ -0,0 +1,9 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert inventory was not populated by aws_ec2 inventory plugin
assert:
that:
- "'aws_ec2' not in groups"

@ -0,0 +1,18 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert cache was used to populate inventory
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- meta: refresh_inventory
- name: assert refresh_inventory updated the cache
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"

@ -0,0 +1,90 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, refresh inventory, remove host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,78 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
# Create new host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
tag1: value1
tag2: value2
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: register the keyed sg group name
set_fact:
sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
- name: register one of the keyed tag groups name
set_fact:
tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
- name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
assert:
that:
# There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
- "groups | length == 9"
- "groups[tag_group_name] | length == 1"
- "groups[sg_group_name] | length == 1"
- "groups.arch_x86_64 | length == 1"
- "groups.tag_with_name_key | length == 1"
- vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: "{{ sg_id }}"
vpc_subnet_id: "{{ subnet_id }}"
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,74 @@
- name: test updating inventory
block:
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: create a new host
ec2:
image: "{{ images[aws_region] }}"
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
ignore_errors: yes

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -eux
# ensure test config is empty
ansible-playbook playbooks/empty_inventory_config.yml "$@"
export ANSIBLE_INVENTORY_ENABLED=aws_ec2
# test with default inventory file
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
export ANSIBLE_INVENTORY=test.aws_ec2.yml
# test empty inventory config
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
# generate inventory config and test using it
ansible-playbook playbooks/create_inventory_config.yml -e @../../integration_config.yml "$@"
ansible-playbook playbooks/test_populating_inventory.yml -e @../../integration_config.yml "$@"
# generate inventory config with caching and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml' @../../integration_config.yml" "$@"
ansible-playbook playbooks/populate_cache.yml -e @../../integration_config.yml "$@"
ansible-playbook playbooks/test_inventory_cache.yml "$@"
# remove inventory cache
rm -r aws_ec2_cache_dir/
# generate inventory config with constructed features and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml' @../../integration_config.yml" "$@"
ansible-playbook playbooks/test_populating_inventory_with_constructed.yml -e @../../integration_config.yml "$@"
# cleanup inventory config
ansible-playbook playbooks/empty_inventory_config.yml "$@"

@ -0,0 +1,12 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
hostnames:
- tag:Name
- dns-name

@ -0,0 +1,12 @@
plugin: aws_ec2
cache: True
cache_plugin: jsonfile
cache_connection: aws_ec2_cache_dir
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'

@ -0,0 +1,20 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
keyed_groups:
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
- key: 'tags'
prefix: 'tag'
- prefix: 'arch'
key: "architecture"
compose:
test_compose_var_sum: tags.tag1 + tags.tag2
groups:
tag_with_name_key: "'Name' in (tags | list)"

@ -28,7 +28,7 @@ import datetime
boto3 = pytest.importorskip('boto3')
botocore = pytest.importorskip('botocore')
from ansible.errors import AnsibleError
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.inventory.aws_ec2 import InventoryModule
from ansible.plugins.inventory.aws_ec2 import instance_data_filter_to_boto_attr
@ -111,62 +111,101 @@ instances = {
}
def test_compile_values():
inv = InventoryModule()
@pytest.fixture(scope="module")
def inventory():
return InventoryModule()
def test_compile_values(inventory):
found_value = instances['Instances'][0]
chain_of_keys = instance_data_filter_to_boto_attr['instance.group-id']
for attr in chain_of_keys:
found_value = inv._compile_values(found_value, attr)
found_value = inventory._compile_values(found_value, attr)
assert found_value == "sg-12345678"
def test_get_boto_attr_chain():
inv = InventoryModule()
def test_get_boto_attr_chain(inventory):
instance = instances['Instances'][0]
assert inv._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321"
assert inventory._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321"
def test_boto3_conn():
inv = InventoryModule()
inv._options = {"boto_profile": "first_precedence",
"aws_access_key_id": "test_access_key",
"aws_secret_access_key": "test_secret_key",
"aws_security_token": "test_security_token"}
inv._set_credentials()
def test_boto3_conn(inventory):
inventory._options = {"boto_profile": "first_precedence",
"aws_access_key_id": "test_access_key",
"aws_secret_access_key": "test_secret_key",
"aws_security_token": "test_security_token"}
inventory._set_credentials()
with pytest.raises(AnsibleError) as error_message:
for connection, region in inv._boto3_conn(regions=['us-east-1']):
for connection, region in inventory._boto3_conn(regions=['us-east-1']):
assert error_message == "Insufficient credentials found."
def test_get_hostname_default():
inv = InventoryModule()
def test_get_hostname_default(inventory):
instance = instances['Instances'][0]
assert inv._get_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com"
assert inventory._get_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com"
def test_get_hostname():
def test_get_hostname(inventory):
hostnames = ['ip-address', 'dns-name']
inv = InventoryModule()
instance = instances['Instances'][0]
assert inv._get_hostname(instance, hostnames) == "12.345.67.890"
assert inventory._get_hostname(instance, hostnames) == "12.345.67.890"
def test_set_credentials(monkeypatch):
inv = InventoryModule()
inv._options = {'aws_access_key_id': 'test_access_key',
'aws_secret_access_key': 'test_secret_key',
'aws_security_token': 'test_security_token',
'boto_profile': 'test_profile'}
inv._set_credentials()
def test_set_credentials(inventory):
inventory._options = {'aws_access_key_id': 'test_access_key',
'aws_secret_access_key': 'test_secret_key',
'aws_security_token': 'test_security_token',
'boto_profile': 'test_profile'}
inventory._set_credentials()
assert inv.boto_profile == "test_profile"
assert inv.aws_access_key_id == "test_access_key"
assert inv.aws_secret_access_key == "test_secret_key"
assert inv.aws_security_token == "test_security_token"
assert inventory.boto_profile == "test_profile"
assert inventory.aws_access_key_id == "test_access_key"
assert inventory.aws_secret_access_key == "test_secret_key"
assert inventory.aws_security_token == "test_security_token"
def test_insufficient_credentials(monkeypatch):
inv = InventoryModule()
def test_insufficient_credentials(inventory):
inventory._options = {
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None
}
with pytest.raises(AnsibleError) as error_message:
inv._set_credentials()
inventory._set_credentials()
assert "Insufficient boto credentials found" in error_message
def test_validate_option(inventory):
assert ['us-east-1'] == inventory._validate_option('regions', list, 'us-east-1')
assert ['us-east-1'] == inventory._validate_option('regions', list, ['us-east-1'])
def test_illegal_option(inventory):
bad_filters = [{'tag:Environment': 'dev'}]
with pytest.raises(AnsibleParserError) as error_message:
inventory._validate_option('filters', dict, bad_filters)
assert "The option filters ([{'tag:Environment': 'dev'}]) must be a <class 'dict'>" == error_message
def test_empty_config_query_options(inventory):
regions, filters, hostnames, strict_permissions = inventory._get_query_options({})
assert regions == filters == hostnames == []
assert strict_permissions is True
def test_conig_query_options(inventory):
regions, filters, hostnames, strict_permissions = inventory._get_query_options(
{'regions': ['us-east-1', 'us-east-2'],
'filters': {'tag:Environment': ['dev', 'prod']},
'hostnames': 'ip-address',
'strict_permissions': False}
)
assert regions == ['us-east-1', 'us-east-2']
assert filters == [{'Name': 'tag:Environment', 'Values': ['dev', 'prod']}]
assert hostnames == ['ip-address']
assert strict_permissions is False
def test_verify_file_bad_config(inventory):
assert inventory.verify_file('not_aws_config.yml') is False

Loading…
Cancel
Save