Bulk autopep8 (modules)

As agreed in 2017-12-07 Core meeting bulk fix pep8 issues

Generated using:
autopep8 1.3.3 (pycodestyle: 2.3.1)
autopep8 -r  --max-line-length 160 --in-place --ignore E305,E402,E722,E741 lib/ansible/modules

Manually fix issues that autopep8 has introduced
pull/33695/head
John Barker 7 years ago committed by John R Barker
parent d13d7e9404
commit c57a7f05e1

@ -303,7 +303,7 @@ def get_block_device_mapping(image):
"""
bdm_dict = dict()
bdm = getattr(image,'block_device_mapping')
bdm = getattr(image, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
@ -319,28 +319,28 @@ def get_block_device_mapping(image):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False, type='bool'),
name = dict(required=False),
platform = dict(required=False),
product_code = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
owner=dict(required=False, default=None),
ami_id=dict(required=False),
ami_tags=dict(required=False, type='dict',
aliases=['search_tags', 'image_tags']),
architecture=dict(required=False),
hypervisor=dict(required=False),
is_public=dict(required=False, type='bool'),
name=dict(required=False),
platform=dict(required=False),
product_code=dict(required=False),
sort=dict(required=False, default=None,
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag=dict(required=False),
sort_order=dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start=dict(required=False),
sort_end=dict(required=False),
state=dict(required=False, default='available'),
virtualization_type=dict(required=False),
no_result_action=dict(required=False, default='success',
choices=['success', 'fail']),
)
)
@ -379,7 +379,7 @@ def main():
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
filter['tag:' + tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
@ -435,9 +435,9 @@ def main():
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order == 'descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
results.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:

@ -136,9 +136,9 @@ def ubuntu(module):
reader = csv.reader(req, delimiter='\t')
try:
ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
store, arch, region, virt)
store, arch, region, virt)
module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
serial=serial)
serial=serial)
except KeyError:
module.fail_json(msg="No matching AMI found")
@ -163,7 +163,7 @@ def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
actual_store, actual_arch, actual_region, ami, aki, ari,
actual_virt) = row
actual = (actual_release, actual_stream, actual_store, actual_arch,
actual_region, actual_virt)
actual_region, actual_virt)
if actual == expected:
# aki and ari are sometimes blank
if aki == '':
@ -185,14 +185,14 @@ def main():
distro=dict(required=True, choices=SUPPORTED_DISTROS),
release=dict(required=True),
stream=dict(required=False, default='server',
choices=['desktop', 'server']),
choices=['desktop', 'server']),
store=dict(required=False, default='ebs',
choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
arch=dict(required=False, default='amd64',
choices=['i386', 'amd64']),
choices=['i386', 'amd64']),
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
virt=dict(required=False, default='paravirtual',
choices=['paravirtual', 'hvm']),
choices=['paravirtual', 'hvm']),
)
module = AnsibleModule(argument_spec=arg_spec)
distro = module.params['distro']

@ -74,12 +74,12 @@ def get_instance_info(instance):
# Get groups
groups = []
for group in instance.groups:
groups.append({ 'id': group.id, 'name': group.name }.copy())
groups.append({'id': group.id, 'name': group.name}.copy())
# Get interfaces
interfaces = []
for interface in instance.interfaces:
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
interfaces.append({'id': interface.id, 'mac_address': interface.mac_address}.copy())
# If an instance is terminated, sourceDestCheck is no longer returned
try:
@ -104,41 +104,41 @@ def get_instance_info(instance):
instance_profile = dict(instance.instance_profile) if instance.instance_profile is not None else None
instance_info = { 'id': instance.id,
'kernel': instance.kernel,
'instance_profile': instance_profile,
'root_device_type': instance.root_device_type,
'private_dns_name': instance.private_dns_name,
'public_dns_name': instance.public_dns_name,
'ebs_optimized': instance.ebs_optimized,
'client_token': instance.client_token,
'virtualization_type': instance.virtualization_type,
'architecture': instance.architecture,
'ramdisk': instance.ramdisk,
'tags': instance.tags,
'key_name': instance.key_name,
'source_destination_check': source_dest_check,
'image_id': instance.image_id,
'groups': groups,
'interfaces': interfaces,
'spot_instance_request_id': instance.spot_instance_request_id,
'requester_id': instance.requester_id,
'monitoring_state': instance.monitoring_state,
'placement': {
'tenancy': instance._placement.tenancy,
'zone': instance._placement.zone
},
'ami_launch_index': instance.ami_launch_index,
'launch_time': instance.launch_time,
'hypervisor': instance.hypervisor,
'region': instance.region.name,
'persistent': instance.persistent,
'private_ip_address': instance.private_ip_address,
'public_ip_address': instance.ip_address,
'state': instance._state.name,
'vpc_id': instance.vpc_id,
'block_device_mapping': bdm_dict,
}
instance_info = {'id': instance.id,
'kernel': instance.kernel,
'instance_profile': instance_profile,
'root_device_type': instance.root_device_type,
'private_dns_name': instance.private_dns_name,
'public_dns_name': instance.public_dns_name,
'ebs_optimized': instance.ebs_optimized,
'client_token': instance.client_token,
'virtualization_type': instance.virtualization_type,
'architecture': instance.architecture,
'ramdisk': instance.ramdisk,
'tags': instance.tags,
'key_name': instance.key_name,
'source_destination_check': source_dest_check,
'image_id': instance.image_id,
'groups': groups,
'interfaces': interfaces,
'spot_instance_request_id': instance.spot_instance_request_id,
'requester_id': instance.requester_id,
'monitoring_state': instance.monitoring_state,
'placement': {
'tenancy': instance._placement.tenancy,
'zone': instance._placement.zone
},
'ami_launch_index': instance.ami_launch_index,
'launch_time': instance.launch_time,
'hypervisor': instance.hypervisor,
'region': instance.region.name,
'persistent': instance.persistent,
'private_ip_address': instance.private_ip_address,
'public_ip_address': instance.ip_address,
'state': instance._state.name,
'vpc_id': instance.vpc_id,
'block_device_mapping': bdm_dict,
}
return instance_info
@ -163,7 +163,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
filters=dict(default=None, type='dict')
)
)

@ -189,6 +189,7 @@ def get_vpc_info(vpc):
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
@ -211,7 +212,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available', })
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
@ -234,8 +235,8 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
@ -284,6 +285,7 @@ def routes_match(rt_list=None, rt=None, igw=None):
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
@ -299,7 +301,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
False when both routes and subnet associations matched.
"""
#We add a one for the main table
# We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
@ -307,13 +309,13 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
msg='The subnet {0} to associate with route_table {1} '
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
@ -388,10 +390,10 @@ def create_vpc(module, vpc_conn):
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
module.fail_json(msg="wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
@ -408,7 +410,6 @@ def create_vpc(module, vpc_conn):
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
@ -420,7 +421,7 @@ def create_vpc(module, vpc_conn):
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
# First add all new subnets
for subnet in subnets:
@ -468,7 +469,7 @@ def create_vpc(module, vpc_conn):
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
while len(vpc_conn.get_all_subnets(filters={'subnet-id': new_subnet.id})) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
@ -548,7 +549,7 @@ def create_vpc(module, vpc_conn):
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
msg='You asked for an Internet Gateway '
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
@ -564,10 +565,10 @@ def create_vpc(module, vpc_conn):
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
msg='The subnet {0} to associate with route_table {1} '
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
@ -576,7 +577,7 @@ def create_vpc(module, vpc_conn):
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id is not None ]
old_rt = [x for x in old_rt if x.id is not None]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
@ -591,7 +592,7 @@ def create_vpc(module, vpc_conn):
changed = True
except EC2ResponseError as e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
msg='Unable to create and associate route table {0}, error: '
'{1}'.format(rt, e)
)
@ -625,7 +626,7 @@ def create_vpc(module, vpc_conn):
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in current_subnets:
returned_subnets.append({
@ -647,6 +648,7 @@ def create_vpc(module, vpc_conn):
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
@ -671,8 +673,8 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
terminated_vpc_id = vpc.id
vpc_dict = get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
@ -709,18 +711,18 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
cidr_block=dict(),
instance_tenancy=dict(choices=['default', 'dedicated'], default='default'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=300),
dns_support=dict(type='bool', default=True),
dns_hostnames=dict(type='bool', default=True),
subnets=dict(type='list'),
vpc_id=dict(),
internet_gateway=dict(type='bool', default=False),
resource_tags=dict(type='dict', required=True),
route_tables=dict(type='list'),
state=dict(choices=['present', 'absent'], default='present'),
)
)
@ -740,7 +742,7 @@ def main():
try:
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")

@ -120,6 +120,7 @@ try:
except ImportError:
HAS_BOTO3 = False
def get_arn_from_kms_alias(kms, aliasname):
ret = kms.list_aliases()
key_id = None
@ -138,12 +139,14 @@ def get_arn_from_kms_alias(kms, aliasname):
return k['KeyArn']
raise Exception('could not find key from id: {}'.format(key_id))
def get_arn_from_role_name(iam, rolename):
ret = iam.get_role(RoleName=rolename)
if ret.get('Role') and ret['Role'].get('Arn'):
return ret['Role']['Arn']
raise Exception('could not find arn for name {}.'.format(rolename))
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
ret = {}
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
@ -179,10 +182,10 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
statement['Principal']['AWS'] = valid_entries
had_invalid_entries = True
if not role_arn in statement['Principal']['AWS']: # needs to be added.
if not role_arn in statement['Principal']['AWS']: # needs to be added.
changes_needed[granttype] = 'add'
statement['Principal']['AWS'].append(role_arn)
elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
changes_needed[granttype] = 'remove'
statement['Principal']['AWS'].remove(role_arn)
@ -210,6 +213,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
return ret
def assert_policy_shape(policy):
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
errors = []
@ -218,7 +222,7 @@ def assert_policy_shape(policy):
found_statement_type = {}
for statement in policy['Statement']:
for label,sidlabel in statement_label.items():
for label, sidlabel in statement_label.items():
if statement['Sid'] == sidlabel:
found_statement_type[label] = True
@ -230,16 +234,17 @@ def assert_policy_shape(policy):
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
return None
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
mode = dict(choices=['grant', 'deny'], default='grant'),
key_alias = dict(required=False, type='str'),
key_arn = dict(required=False, type='str'),
role_name = dict(required=False, type='str'),
role_arn = dict(required=False, type='str'),
grant_types = dict(required=False, type='list'),
clean_invalid_entries = dict(type='bool', default=True),
mode=dict(choices=['grant', 'deny'], default='grant'),
key_alias=dict(required=False, type='str'),
key_arn=dict(required=False, type='str'),
role_name=dict(required=False, type='str'),
role_arn=dict(required=False, type='str'),
grant_types=dict(required=False, type='list'),
clean_invalid_entries=dict(type='bool', default=True),
)
)
@ -255,7 +260,6 @@ def main():
result = {}
mode = module.params['mode']
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
@ -263,7 +267,6 @@ def main():
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
try:
if module.params['key_alias'] and not module.params['key_arn']:
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
@ -282,9 +285,9 @@ def main():
module.fail_json(msg='{} is an unknown grant type.'.format(g))
ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'],
mode=mode,
dry_run=module.check_mode,
clean_invalid_entries=module.params['clean_invalid_entries'])
mode=mode,
dry_run=module.check_mode,
clean_invalid_entries=module.params['clean_invalid_entries'])
result.update(ret)
except Exception as err:

@ -263,7 +263,7 @@ from ansible.module_utils._text import to_bytes, to_native
def get_stack_events(cfn, stack_name, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events':[], 'log':[]}
ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
@ -348,8 +348,8 @@ def create_changeset(module, stack_params, cfn):
cs = cfn.create_change_set(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET')
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
@ -413,7 +413,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
except:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
@ -421,12 +421,12 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, op_token)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'})
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
@ -435,7 +435,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
return ret
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output' : 'Stack %s complete' % operation })
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
@ -447,7 +447,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output':'Failed for unknown reasons.'}
return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
@ -470,7 +470,7 @@ def check_mode_changeset(module, stack_params, cfn):
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
@ -496,7 +496,7 @@ def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
@ -567,7 +567,7 @@ def main():
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':str(v)} for k, v in template_parameters.items()]
stack_params['Parameters'] = [{'ParameterKey': k, 'ParameterValue': str(v)} for k, v in template_parameters.items()]
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
@ -637,7 +637,7 @@ def main():
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
@ -658,8 +658,8 @@ def main():
if module.params['template_format'] is not None:
result['warnings'] = [('Argument `template_format` is deprecated '
'since Ansible 2.3, JSON and YAML templates are now passed '
'directly to the CloudFormation API.')]
'since Ansible 2.3, JSON and YAML templates are now passed '
'directly to the CloudFormation API.')]
module.exit_json(**result)

@ -259,7 +259,7 @@ class CloudFrontServiceManager:
def get_distribution(self, distribution_id):
try:
func = partial(self.client.get_distribution,Id=distribution_id)
func = partial(self.client.get_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution - " + str(e),
@ -268,7 +268,7 @@ class CloudFrontServiceManager:
def get_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_distribution_config,Id=distribution_id)
func = partial(self.client.get_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
@ -277,7 +277,7 @@ class CloudFrontServiceManager:
def get_origin_access_identity(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity,Id=origin_access_identity_id)
func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
@ -286,7 +286,7 @@ class CloudFrontServiceManager:
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity_config,Id=origin_access_identity_id)
func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
@ -295,7 +295,7 @@ class CloudFrontServiceManager:
def get_invalidation(self, distribution_id, invalidation_id):
try:
func = partial(self.client.get_invalidation,DistributionId=distribution_id,Id=invalidation_id)
func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing invalidation - " + str(e),
@ -304,7 +304,7 @@ class CloudFrontServiceManager:
def get_streaming_distribution(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution,Id=distribution_id)
func = partial(self.client.get_streaming_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
@ -313,7 +313,7 @@ class CloudFrontServiceManager:
def get_streaming_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution_config,Id=distribution_id)
func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
@ -399,13 +399,13 @@ class CloudFrontServiceManager:
def summary_get_origin_access_identity_list(self):
try:
origin_access_identity_list = { 'origin_access_identities': [] }
origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
oai_summary = { 'Id': oai_id, 'ETag': oai_full_response['ETag'] }
origin_access_identity_list['origin_access_identities'].append( oai_summary )
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
@ -415,8 +415,8 @@ class CloudFrontServiceManager:
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled' ]
distribution_list = { list_name: [] }
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
@ -520,16 +520,18 @@ class CloudFrontServiceManager:
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
keyed_list.update( { alias: item } )
keyed_list.update( { distribution_id: item } )
keyed_list.update({alias: item})
keyed_list.update({distribution_id: item})
return keyed_list
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
facts[distribution_id].update(details)
for alias in aliases:
facts[alias].update(details)
return facts
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@ -581,17 +583,17 @@ def main():
summary = module.params.get('summary')
aliases = []
result = { 'cloudfront': {} }
result = {'cloudfront': {}}
facts = {}
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
streaming_distribution_config or list_invalidations)
streaming_distribution_config or list_invalidations)
# set default to summary if no option specified
summary = summary or not (distribution or distribution_config or origin_access_identity or
origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
list_streaming_distributions or list_distributions)
origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
list_streaming_distributions or list_distributions)
# validations
if require_distribution_id and distribution_id is None and domain_name_alias is None:
@ -611,21 +613,21 @@ def main():
# set appropriate cloudfront id
if distribution_id and not list_invalidations:
facts = { distribution_id: {} }
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update( { alias: {} } )
facts.update({alias: {}})
if invalidation_id:
facts.update( { invalidation_id: {} } )
facts.update({invalidation_id: {}})
elif distribution_id and list_invalidations:
facts = { distribution_id: {} }
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update( { alias: {} } )
facts.update({alias: {}})
elif origin_access_identity_id:
facts = { origin_access_identity_id: {} }
facts = {origin_access_identity_id: {}}
elif web_acl_id:
facts = { web_acl_id: {} }
facts = {web_acl_id: {}}
# get details based on options
if distribution:
@ -644,7 +646,7 @@ def main():
if streaming_distribution_config:
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
if list_invalidations:
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id) }
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
if 'facts_to_set' in vars():
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)

@ -332,7 +332,7 @@ def activate_pipeline(client, module):
pass
else:
module.fail_json(msg=('Data Pipeline {0} failed to activate '
'within timeout {1} seconds').format(dp_name, timeout))
'within timeout {1} seconds').format(dp_name, timeout))
changed = True
data_pipeline = get_result(client, dp_id)
@ -477,7 +477,7 @@ def diff_pipeline(client, module, objects, unique_id, dp_name):
result = {'data_pipeline': data_pipeline,
'msg': msg}
except DataPipelineNotFound:
create_dp = True
create_dp = True
return create_dp, changed, result

@ -223,7 +223,6 @@ def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
else:
@ -349,7 +348,7 @@ def has_throughput_changed(table, new_throughput):
return False
return new_throughput['read'] != table.throughput['read'] or \
new_throughput['write'] != table.throughput['write']
new_throughput['write'] != table.throughput['write']
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
@ -397,6 +396,7 @@ def validate_index(index, module):
if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
def get_indexes(all_indexes):
indexes = []
global_indexes = []
@ -429,7 +429,6 @@ def get_indexes(all_indexes):
return indexes, global_indexes
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@ -442,8 +441,8 @@ def main():
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
indexes=dict(default=[], type='list'),
tags = dict(type='dict'),
wait_for_active_timeout = dict(default=60, type='int'),
tags=dict(type='dict'),
wait_for_active_timeout=dict(default=60, type='int'),
))
module = AnsibleModule(

@ -158,7 +158,6 @@ except ImportError:
HAS_BOTO3 = False
def copy_image(module, ec2):
"""
Copies an AMI
@ -185,8 +184,8 @@ def copy_image(module, ec2):
if module.params.get('tags'):
ec2.create_tags(
Resources=[image_id],
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
)
Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
)
module.exit_json(changed=True, image_id=image_id)
except WaiterError as we:

@ -171,7 +171,7 @@ class ElbManager:
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
found = True
break
return found
@ -330,7 +330,7 @@ def main():
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
ec2_elbs={'default': None, 'required': False, 'type': 'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
@ -363,7 +363,7 @@ def main():
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
msg = "ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':

@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
@ -579,10 +581,10 @@ class ElbManager(object):
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
info['instance_health'] = [dict(
instance_id=instance_state.instance_id,
reason_code=instance_state.reason_code,
state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
@ -663,7 +665,7 @@ class ElbManager(object):
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
@ -888,13 +890,13 @@ class ElbManager(object):
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
@ -962,7 +964,7 @@ class ElbManager(object):
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
@ -1002,10 +1004,10 @@ class ElbManager(object):
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
@ -1223,7 +1225,7 @@ class ElbManager(object):
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
self.changed = True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
@ -1232,7 +1234,7 @@ class ElbManager(object):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
@ -1275,7 +1277,7 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
@ -1321,7 +1323,7 @@ def main():
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
@ -1333,10 +1335,10 @@ def main():
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
module.fail_json(msg=str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,

@ -68,9 +68,9 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError,
ansible_dict_to_boto3_filter_list, boto3_conn,
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
connect_to_aws, ec2_argument_spec, get_aws_connection_info)
ansible_dict_to_boto3_filter_list, boto3_conn,
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
connect_to_aws, ec2_argument_spec, get_aws_connection_info)
def list_ec2_eni_boto3(connection, module):
@ -99,7 +99,7 @@ def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
@ -152,7 +152,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
filters=dict(default=None, type='dict')
)
)

@ -339,7 +339,7 @@ def create_launch_config(connection, module):
module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
result = (dict((k, v) for k, v in launch_config.items()
if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))

@ -191,7 +191,7 @@ def list_launch_configs(connection, module):
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
if sort:
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:
@ -210,13 +210,13 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=False, default=[], type='list'),
sort = dict(required=False, default=None,
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
name=dict(required=False, default=[], type='list'),
sort=dict(required=False, default=None,
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
sort_order=dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start=dict(required=False),
sort_end=dict(required=False),
)
)

@ -199,13 +199,13 @@ def create_metric_alarm(connection, module):
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
# this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
@ -215,10 +215,10 @@ def create_metric_alarm(connection, module):
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
changed = True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
@ -233,24 +233,25 @@ def create_metric_alarm(connection, module):
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
@ -289,7 +290,7 @@ def main():
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
)
module = AnsibleModule(argument_spec=argument_spec)

@ -88,7 +88,7 @@ def create_scaling_policy(connection, module):
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
@ -101,7 +101,7 @@ def create_scaling_policy(connection, module):
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
@ -121,7 +121,7 @@ def create_scaling_policy(connection, module):
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
@ -129,7 +129,7 @@ def create_scaling_policy(connection, module):
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
@ -140,7 +140,7 @@ def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if scalingPolicies:
try:
@ -156,12 +156,12 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
name=dict(required=True, type='str'),
adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
asg_name=dict(required=True, type='str'),
scaling_adjustment=dict(type='int'),
min_adjustment_step=dict(type='int'),
cooldown=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
@ -178,7 +178,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg = str(e))
module.fail_json(msg=str(e))
if state == 'present':
create_scaling_policy(connection, module)

@ -184,7 +184,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
@ -193,7 +193,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
@ -202,7 +202,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
@ -210,7 +210,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
@ -221,7 +221,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
@ -249,16 +249,16 @@ def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
snapshot_id = dict(),
device_name = dict(),
wait = dict(type='bool', default=True),
wait_timeout = dict(type='int', default=0),
last_snapshot_min_age = dict(type='int', default=0),
snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent', 'present'], default='present'),
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)

@ -179,8 +179,8 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def list_ec2_snapshots(connection, module):

@ -126,9 +126,9 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(type='dict'),
state = dict(default='present', choices=['present', 'absent', 'list']),
resource=dict(required=True),
tags=dict(type='dict'),
state=dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@ -144,7 +144,7 @@ def main():
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
filters = {'resource-id': resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
@ -158,14 +158,14 @@ def main():
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
module.exit_json(msg="Tags already exists in %s." % resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
if not module.check_mode:
ec2.create_tags(resource, dictadd)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd, resource), changed=True)
if state == 'absent':
if not tags:
@ -180,7 +180,7 @@ def main():
dictremove[key] = value
if not module.check_mode:
ec2.delete_tags(resource, dictremove)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove, resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)

@ -280,7 +280,7 @@ def get_volume(module, ec2):
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
@ -306,7 +306,7 @@ def get_volumes(module, ec2):
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return vols
@ -330,6 +330,7 @@ def boto_supports_volume_encryption():
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
@ -339,6 +340,7 @@ def boto_supports_kms_key_id():
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
@ -375,7 +377,7 @@ def create_volume(module, ec2, zone):
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return volume, changed
@ -400,12 +402,12 @@ def attach_volume(module, ec2, volume, instance):
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
module.fail_json(msg="Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
@ -418,7 +420,7 @@ def attach_volume(module, ec2, volume, instance):
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
@ -435,7 +437,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
@ -450,7 +452,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return changed
@ -506,20 +508,20 @@ def get_volume_info(volume, state):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
kms_key_id = dict(),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present'),
tags = dict(type='dict', default={})
instance=dict(),
id=dict(),
name=dict(),
volume_size=dict(),
volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops=dict(),
encrypted=dict(type='bool', default=False),
kms_key_id=dict(),
device_name=dict(),
delete_on_termination=dict(type='bool', default=False),
zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot=dict(),
state=dict(choices=['absent', 'present', 'list'], default='present'),
tags=dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)

@ -92,12 +92,13 @@ def get_volume_info(volume):
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
},
'tags': volume.tags
}
}
return volume_info
def list_ec2_volumes(connection, module):
filters = module.params.get("filters")
@ -118,7 +119,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
filters=dict(default=None, type='dict')
)
)

@ -235,6 +235,7 @@ def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
except EC2ResponseError as e:
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
"""
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
@ -284,7 +285,7 @@ def main():
inherit_existing=dict(type='bool', default=False),
tags=dict(type='dict', default=None, aliases=['resource_tags']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@ -312,17 +313,17 @@ def main():
new_options['ntp-servers'] = params['ntp_servers']
if params['domain_name'] is not None:
# needs to be a list for comparison with boto objects later
new_options['domain-name'] = [ params['domain_name'] ]
new_options['domain-name'] = [params['domain_name']]
if params['netbios_node_type'] is not None:
# needs to be a list for comparison with boto objects later
new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
# If we were given a vpc_id then we need to look at the options on that
if params['vpc_id']:
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
# if we've been asked to inherit existing options, do that now
if params['inherit_existing']:
if existing_options:
for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
new_options[option] = existing_options.options.get(option)
@ -336,7 +337,7 @@ def main():
# Now let's cover the case where there are existing options that we were told about by id
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
else:
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
if len(supplied_options) != 1:
if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")

@ -147,7 +147,7 @@ from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_conn
DEFAULT_RULE_FIELDS = {
'RuleNumber': 32767,
'RuleAction': 'deny',
'CidrBlock': '0.0.0.0/0',
'CidrBlock': '0.0.0.0/0',
'Protocol': '-1'
}
@ -159,7 +159,7 @@ DEFAULT_EGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', True)])
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
#Utility methods
# Utility methods
def icmp_present(entry):
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
return True
@ -225,7 +225,7 @@ def nacls_changed(nacl, client, module):
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
nacl = describe_network_acl(client, module)
entries = nacl['NetworkAcls'][0]['Entries']
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS != entry]
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
@ -321,7 +321,7 @@ def construct_acl_entries(nacl, client, module):
create_network_acl_entry(params, client, module)
## Module invocations
# Module invocations
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
@ -372,7 +372,7 @@ def remove_network_acl(client, module):
return changed, result
#Boto3 client methods
# Boto3 client methods
def create_network_acl(vpc_id, client, module):
try:
if module.check_mode:
@ -546,7 +546,7 @@ def main():
ingress=dict(required=False, type='list', default=list()),
egress=dict(required=False, type='list', default=list(),),
state=dict(default='present', choices=['present', 'absent']),
),
),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,

@ -154,9 +154,9 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
matching_vpcs = vpc.get_all_vpcs(filters={'tag:Name': name, 'cidr-block': cidr_block})
except Exception as e:
e_msg=boto_exception(e)
e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
if multi:
@ -186,7 +186,7 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
else:
return False
except Exception as e:
e_msg=boto_exception(e)
e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
@ -199,6 +199,7 @@ def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
@ -213,18 +214,19 @@ def get_vpc_values(vpc_obj):
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
name=dict(type='str', default=None, required=True),
cidr_block=dict(type='str', default=None, required=True),
tenancy=dict(choices=['default', 'dedicated'], default='default'),
dns_support=dict(type='bool', default=True),
dns_hostnames=dict(type='bool', default=True),
dhcp_opts_id=dict(type='str', default=None, required=False),
tags=dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state=dict(choices=['present', 'absent'], default='present'),
multi_ok=dict(type='bool', default=False)
)
)
@ -236,17 +238,17 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
name = module.params.get('name')
cidr_block = module.params.get('cidr_block')
tenancy = module.params.get('tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
dhcp_id = module.params.get('dhcp_opts_id')
tags = module.params.get('tags')
state = module.params.get('state')
multi = module.params.get('multi_ok')
changed=False
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
@ -298,7 +300,7 @@ def main():
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError as e:
e_msg=boto_exception(e)
e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
if not module.check_mode:
@ -306,7 +308,7 @@ def main():
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError as e:
e_msg=boto_exception(e)
e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
@ -325,7 +327,7 @@ def main():
except BotoServerError as e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))

@ -373,7 +373,7 @@ def main():
client = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - "+str(e))
module.fail_json(msg="Can't authorize connection - " + str(e))
if state == 'present':
(changed, results) = create_peer_connection(client, module)

@ -147,6 +147,7 @@ def get_vgw_info(vgws):
return vgw_info
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
@ -227,7 +228,7 @@ def delete_vgw(client, module, vpn_gateway_id):
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
#return the deleted VpnGatewayId as this is not included in the above response
# return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
return result
@ -236,7 +237,7 @@ def create_tags(client, module, vpn_gateway_id):
params = dict()
try:
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@ -280,7 +281,7 @@ def find_tags(client, module, resource_id=None):
try:
response = client.describe_tags(Filters=[
{'Name': 'resource-id', 'Values': [resource_id]}
])
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@ -295,7 +296,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
changed = False
tags_list = {}
#format tags for comparison
# format tags for comparison
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_list[tags['Key']] = tags['Value']
@ -307,7 +308,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
vgw = find_vgw(client, module)
changed = True
#if no tag args are supplied, delete any existing tags with the exception of the name tag
# if no tag args are supplied, delete any existing tags with the exception of the name tag
if params['Tags'] is None and tags_list != {}:
tags_to_delete = []
for tags in existing_vgw[0]['Tags']:
@ -346,7 +347,7 @@ def find_vgw(client, module, vpn_gateway_id=None):
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@ -362,7 +363,7 @@ def find_vgw(client, module, vpn_gateway_id=None):
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@ -502,7 +503,7 @@ def ensure_vgw_absent(client, module):
deleted_vgw = "Nothing to do"
else:
#Check that a name and type argument has been supplied if no vgw-id
# Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
@ -518,7 +519,7 @@ def ensure_vgw_absent(client, module):
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
#now that the vpc has been detached, delete the vgw
# now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
@ -528,7 +529,7 @@ def ensure_vgw_absent(client, module):
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
#now that the vpc has been detached, delete the vgw
# now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
else:
@ -555,7 +556,7 @@ def main():
wait_timeout=dict(type='int', default=320),
type=dict(default='ipsec.1', choices=['ipsec.1']),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
)
)
)
module = AnsibleModule(argument_spec=argument_spec)

@ -107,10 +107,10 @@ from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info
def get_virtual_gateway_info(virtual_gateway):
virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'],
'State': virtual_gateway['State'],
'Type': virtual_gateway['Type'],
'VpcAttachments': virtual_gateway['VpcAttachments'],
'Tags': virtual_gateway['Tags']}
'State': virtual_gateway['State'],
'Type': virtual_gateway['Type'],
'VpcAttachments': virtual_gateway['VpcAttachments'],
'Tags': virtual_gateway['Tags']}
return virtual_gateway_info
@ -126,10 +126,10 @@ def list_virtual_gateways(client, module):
try:
all_virtual_gateways = client.describe_vpn_gateways(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e),exception=traceback.format_exc())
module.fail_json(msg=str(e), exception=traceback.format_exc())
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
for vgw in all_virtual_gateways['VpnGateways']]
for vgw in all_virtual_gateways['VpnGateways']]
module.exit_json(virtual_gateways=snaked_vgws)
@ -138,8 +138,8 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(type='dict', default=dict()),
vpn_gateway_ids = dict(type='list', default=None)
filters=dict(type='dict', default=dict()),
vpn_gateway_ids=dict(type='list', default=None)
)
)
@ -153,7 +153,7 @@ def main():
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - "+str(e))
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_virtual_gateways(connection, module)

@ -119,11 +119,11 @@ BACKEND = default_backend()
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True, type='path'),
key_passphrase = dict(no_log=True, default=None, required=False),
wait = dict(type='bool', default=False, required=False),
wait_timeout = dict(default=120, required=False),
instance_id=dict(required=True),
key_file=dict(required=True, type='path'),
key_passphrase=dict(no_log=True, default=None, required=False),
wait=dict(type='bool', default=False, required=False),
wait_timeout=dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -158,18 +158,18 @@ def main():
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
try:
f = open(key_file, 'rb')
except IOError as e:
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
else:
try:
with f:
key = load_pem_private_key(f.read(), b_key_passphrase, BACKEND)
except (ValueError, TypeError) as e:
module.fail_json(msg = "unable to parse key file")
module.fail_json(msg="unable to parse key file")
try:
decrypted = key.decrypt(decoded, PKCS1v15())

@ -144,34 +144,35 @@ class EcsClusterManager:
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures'])>0:
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason']=='MISSING':
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['clusters'])>0:
if len(response['clusters']) > 0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName = 'default'):
def create_cluster(self, clusterName='default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
name=dict(required=True, type='str' ),
state=dict(required=True, choices=['present', 'absent', 'has_instances']),
name=dict(required=True, type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = ( ['state', 'name'] )
required_together = (['state', 'name'])
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
@ -185,12 +186,12 @@ def main():
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status']=="ACTIVE":
results['cluster']=existing
if existing and 'status' in existing and existing['status'] == "ACTIVE":
results['cluster'] = existing
else:
if not module.check_mode:
# doesn't exist. create it.
@ -205,7 +206,7 @@ def main():
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
@ -213,7 +214,7 @@ def main():
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
@ -228,8 +229,8 @@ def main():
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat-1:
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
if count == 0 and i is repeat - 1:
module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)

@ -308,13 +308,13 @@ class EcsServiceManager:
cluster=cluster_name,
services=[service_name])
msg = ''
if len(response['failures'])>0:
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is " + c['reason']
if c and c['reason']=='MISSING':
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['services'])>0:
if len(response['services']) > 0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
@ -426,7 +426,7 @@ def main():
matching = False
update = False
if existing and 'status' in existing and existing['status']=="ACTIVE":
if existing and 'status' in existing and existing['status'] == "ACTIVE":
if service_mgr.is_matching_service(module.params, existing):
matching = True
results['service'] = service_mgr.jsonize(existing)
@ -446,25 +446,25 @@ def main():
if update:
# update required
response = service_mgr.update_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration)
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration)
else:
# doesn't exist. create it.
response = service_mgr.create_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration,
module.params['placement_constraints'],
module.params['placement_strategy'])
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration,
module.params['placement_constraints'],
module.params['placement_strategy'])
results['service'] = response
@ -479,7 +479,7 @@ def main():
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:

@ -170,17 +170,17 @@ class EcsServiceManager:
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
relevant_response = dict(services = response['serviceArns'])
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services']=services.split(",")
fn_args['services'] = services.split(",")
response = self.ecs.describe_services(**fn_args)
relevant_response = dict(services = map(self.extract_service_from, response['services']))
if 'failures' in response and len(response['failures'])>0:
relevant_response = dict(services=map(self.extract_service_from, response['services']))
if 'failures' in response and len(response['failures']) > 0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
@ -199,13 +199,14 @@ class EcsServiceManager:
e['createdAt'] = str(e['createdAt'])
return service
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(required=False, type='bool', default=False ),
cluster=dict(required=False, type='str' ),
service=dict(required=False, type='str' )
details=dict(required=False, type='bool', default=False),
cluster=dict(required=False, type='str'),
service=dict(required=False, type='str')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)

@ -186,7 +186,7 @@ class EcsExecManager:
family=service_name,
desiredStatus=status
)
if len(response['taskArns'])>0:
if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
@ -209,13 +209,13 @@ class EcsExecManager:
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition']=task_definition
args['taskDefinition'] = task_definition
if overrides:
args['overrides']=overrides
args['overrides'] = overrides
if container_instances:
args['containerInstances']=container_instances
args['containerInstances'] = container_instances
if startedBy:
args['startedBy']=startedBy
args['startedBy'] = startedBy
response = self.ecs.start_task(**args)
# include tasks and failures
return response['tasks']
@ -224,17 +224,18 @@ class EcsExecManager:
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop'] ),
cluster=dict(required=False, type='str' ), # R S P
task_definition=dict(required=False, type='str' ), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int' ), # R
task=dict(required=False, type='str' ), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str' ) # R S
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str') # R S
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@ -276,7 +277,7 @@ def main():
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task']=existing
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
@ -290,7 +291,7 @@ def main():
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task']=existing
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
@ -304,7 +305,7 @@ def main():
elif module.params['operation'] == 'stop':
if existing:
results['task']=existing
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.

@ -131,6 +131,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_text
class EcsTaskManager:
"""Handles ECS Tasks"""
@ -183,7 +184,7 @@ class EcsTaskManager:
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
"nextToken": None
"nextToken": None
}
def fetch():
@ -371,7 +372,7 @@ def main():
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
module.params['revision'] is not None:
module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")

@ -76,10 +76,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connec
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
description=dict(required=False),
subnets=dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -87,26 +87,25 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
module.fail_json(msg=str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
@ -123,7 +122,7 @@ def main():
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
if state == 'absent':
if exists:
@ -139,7 +138,7 @@ def main():
except BotoServerError as e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
else:
changed = False

@ -657,7 +657,6 @@ def compare_listeners(connection, module, current_listeners, new_listeners, purg
def compare_rules(connection, module, current_listeners, listener):
"""
Compare rules and return rules to add, rules to modify and rules to remove
Rules are compared based on priority

@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
@ -579,10 +581,10 @@ class ElbManager(object):
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
info['instance_health'] = [dict(
instance_id=instance_state.instance_id,
reason_code=instance_state.reason_code,
state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
@ -663,7 +665,7 @@ class ElbManager(object):
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
@ -888,13 +890,13 @@ class ElbManager(object):
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
@ -962,7 +964,7 @@ class ElbManager(object):
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
@ -1002,10 +1004,10 @@ class ElbManager(object):
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
@ -1223,7 +1225,7 @@ class ElbManager(object):
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
self.changed = True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
@ -1232,7 +1234,7 @@ class ElbManager(object):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
@ -1275,7 +1277,7 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
@ -1321,7 +1323,7 @@ def main():
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
@ -1333,10 +1335,10 @@ def main():
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
module.fail_json(msg=str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,

@ -167,7 +167,7 @@ class ElbManager:
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
found = True
break
return found
@ -326,7 +326,7 @@ def main():
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
ec2_elbs={'default': None, 'required': False, 'type': 'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
@ -359,7 +359,7 @@ def main():
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
msg = "ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':

@ -153,13 +153,13 @@ from ansible.module_utils._text import to_native
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name = dict(),
function_arn = dict(),
wait = dict(default=True, type='bool'),
tail_log = dict(default=False, type='bool'),
dry_run = dict(default=False, type='bool'),
version_qualifier = dict(),
payload = dict(default={}, type='dict'),
name=dict(),
function_arn=dict(),
wait=dict(default=True, type='bool'),
tail_log=dict(default=False, type='bool'),
dry_run=dict(default=False, type='bool'),
version_qualifier=dict(),
payload=dict(default={}, type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
@ -172,13 +172,13 @@ def main():
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
name = module.params.get('name')
function_arn = module.params.get('function_arn')
await_return = module.params.get('wait')
dry_run = module.params.get('dry_run')
tail_log = module.params.get('tail_log')
version_qualifier = module.params.get('version_qualifier')
payload = module.params.get('payload')
name = module.params.get('name')
function_arn = module.params.get('function_arn')
await_return = module.params.get('wait')
dry_run = module.params.get('dry_run')
tail_log = module.params.get('tail_log')
version_qualifier = module.params.get('version_qualifier')
payload = module.params.get('payload')
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
@ -247,7 +247,7 @@ def main():
module.fail_json(msg="Unexpected failure while invoking Lambda function",
exception=traceback.format_exc())
results ={
results = {
'logs': '',
'status': response['StatusCode'],
'output': '',
@ -276,7 +276,7 @@ def main():
# format the stacktrace sent back as an array into a multiline string
'trace': '\n'.join(
[' '.join([
str(x) for x in line # cast line numbers to strings
str(x) for x in line # cast line numbers to strings
]) for line in results.get('output', {}).get('stackTrace', [])]
),
'errmsg': results['output'].get('errorMessage'),

@ -240,8 +240,8 @@ def create_user(module, iam, name, pwd, path, key_state, key_count):
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.\
create_access_key_result.\
user_name=name).create_access_key_response.
create_access_key_result.
access_key)
key_qty += 1
else:
@ -258,7 +258,7 @@ def delete_dependencies_first(module, iam, name):
# try to delete any keys
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
changed = True
@ -447,7 +447,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
@ -526,6 +526,7 @@ def delete_group(module=None, iam=None, name=None):
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
@ -554,12 +555,12 @@ def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc)
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name,
path=path).create_instance_profile_response.create_instance_profile_result.instance_profile
path=path).create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
@ -685,7 +686,7 @@ def main():
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specify present or absent")
"please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
@ -865,7 +866,7 @@ def main():
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
instance_profile_result=instance_profile_result)
instance_profile_result=instance_profile_result)
if __name__ == '__main__':

@ -138,15 +138,15 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
list_user_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.parse.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
@ -168,8 +168,8 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@ -182,8 +182,8 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
@ -195,7 +195,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
@ -220,8 +220,8 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@ -232,19 +232,19 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
msg = ''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
list_group_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
msg=("The policy document you specified already exists "
"under the name %s." % pol)
msg = ("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
@ -264,8 +264,8 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state):
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@ -317,7 +317,7 @@ def main():
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
pdoc=None
pdoc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@ -343,8 +343,8 @@ def main():
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)

@ -80,10 +80,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
description=dict(required=False),
subnets=dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -91,30 +91,30 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
module.fail_json(msg=str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
except BotoServerError as e:
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
try:
changed = False
@ -125,7 +125,7 @@ def main():
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
if state == 'absent':
if exists:
@ -145,7 +145,7 @@ def main():
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
module.exit_json(changed=changed)

@ -237,15 +237,15 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier' : resource['ClusterIdentifier'],
'create_time' : resource['ClusterCreateTime'],
'status' : resource['ClusterStatus'],
'username' : resource['MasterUsername'],
'db_name' : resource['DBName'],
'availability_zone' : resource['AvailabilityZone'],
'identifier': resource['ClusterIdentifier'],
'create_time': resource['ClusterCreateTime'],
'status': resource['ClusterStatus'],
'username': resource['MasterUsername'],
'db_name': resource['DBName'],
'availability_zone': resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
'url' : resource['Endpoint']['Address'],
'port' : resource['Endpoint']['Port']
'url': resource['Endpoint']['Address'],
'port': resource['Endpoint']['Port']
}
for node in resource['ClusterNodes']:
@ -267,11 +267,11 @@ def create_cluster(module, redshift):
Returns:
"""
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
@ -286,7 +286,7 @@ def create_cluster(module, redshift):
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip'):
if p in module.params:
params[ p ] = module.params.get( p )
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@ -310,7 +310,7 @@ def create_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@ -368,7 +368,7 @@ def delete_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@ -386,8 +386,8 @@ def modify_cluster(module, redshift):
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
@ -422,7 +422,7 @@ def modify_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@ -436,34 +436,34 @@ def modify_cluster(module, redshift):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier = dict(required=True),
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
'dc2.large','dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
'dw2.8xlarge'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(require=False),
cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups = dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
skip_final_cluster_snapshot = dict(aliases=['skip_final_snapshot'], type='bool', default=False),
final_cluster_snapshot_identifier = dict(aliases=['final_snapshot_id'], required=False),
cluster_subnet_group_name = dict(aliases=['subnet']),
availability_zone = dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name = dict(aliases=['param_group_name']),
automated_snapshot_retention_period = dict(aliases=['retention_period']),
port = dict(type='int'),
cluster_version = dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes = dict(type='int'),
publicly_accessible = dict(type='bool', default=False),
encrypted = dict(type='bool', default=False),
elastic_ip = dict(required=False),
new_cluster_identifier = dict(aliases=['new_identifier']),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier=dict(required=True),
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
'dw2.8xlarge'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(require=False),
cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups=dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
cluster_subnet_group_name=dict(aliases=['subnet']),
availability_zone=dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name=dict(aliases=['param_group_name']),
automated_snapshot_retention_period=dict(aliases=['retention_period']),
port=dict(type='int'),
cluster_version=dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes=dict(type='int'),
publicly_accessible=dict(type='bool', default=False),
encrypted=dict(type='bool', default=False),
elastic_ip=dict(required=False),
new_cluster_identifier=dict(aliases=['new_identifier']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
))
required_if = [

@ -166,6 +166,7 @@ def find_health_check(conn, wanted):
return check
return None
def to_health_check(config):
return HealthCheck(
config.get('IPAddress'),
@ -178,6 +179,7 @@ def to_health_check(config):
failure_threshold=int(config.get('FailureThreshold')),
)
def health_check_diff(a, b):
a = a.__dict__
b = b.__dict__
@ -189,6 +191,7 @@ def health_check_diff(a, b):
diff[key] = b.get(key)
return diff
def to_template_params(health_check):
params = {
'ip_addr_part': '',
@ -240,7 +243,8 @@ UPDATEHCXMLBody = """
</UpdateHealthCheckRequest>
"""
def create_health_check(conn, health_check, caller_ref = None):
def create_health_check(conn, health_check, caller_ref=None):
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % conn.Version
@ -259,6 +263,7 @@ def create_health_check(conn, health_check, caller_ref = None):
else:
raise exception.DNSServerError(response.status, response.reason, body)
def update_health_check(conn, health_check_id, health_check_version, health_check):
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
params = to_template_params(health_check)
@ -279,18 +284,19 @@ def update_health_check(conn, health_check_id, health_check_version, health_chec
h.parse(body)
return e
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(choices=['present', 'absent'], default='present'),
ip_address = dict(),
port = dict(type='int'),
type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
resource_path = dict(),
fqdn = dict(),
string_match = dict(),
request_interval = dict(type='int', choices=[10, 30], default=30),
failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
state=dict(choices=['present', 'absent'], default='present'),
ip_address=dict(),
port=dict(type='int'),
type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
resource_path=dict(),
fqdn=dict(),
string_match=dict(),
request_interval=dict(type='int', choices=[10, 30], default=30),
failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -298,15 +304,15 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto 2.27.0+ required for this module')
state_in = module.params.get('state')
ip_addr_in = module.params.get('ip_address')
port_in = module.params.get('port')
type_in = module.params.get('type')
resource_path_in = module.params.get('resource_path')
fqdn_in = module.params.get('fqdn')
string_match_in = module.params.get('string_match')
request_interval_in = module.params.get('request_interval')
failure_threshold_in = module.params.get('failure_threshold')
state_in = module.params.get('state')
ip_addr_in = module.params.get('ip_address')
port_in = module.params.get('port')
type_in = module.params.get('type')
resource_path_in = module.params.get('resource_path')
fqdn_in = module.params.get('fqdn')
string_match_in = module.params.get('string_match')
request_interval_in = module.params.get('request_interval')
failure_threshold_in = module.params.get('failure_threshold')
if ip_addr_in is None and fqdn_in is None:
module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
@ -334,7 +340,7 @@ def main():
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg = e.error_message)
module.fail_json(msg=e.error_message)
changed = False
action = None
@ -362,7 +368,7 @@ def main():
conn.delete_health_check(check_id)
changed = True
else:
module.fail_json(msg = "Logic Error: Unknown state")
module.fail_json(msg="Logic Error: Unknown state")
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)

@ -256,6 +256,7 @@ def create_lifecycle_rule(connection, module):
module.exit_json(changed=changed)
def compare_rule(rule_a, rule_b):
# Copy objects
@ -364,27 +365,27 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
expiration_days = dict(default=None, required=False, type='int'),
expiration_date = dict(default=None, required=False, type='str'),
prefix = dict(default=None, required=False),
requester_pays = dict(default='no', type='bool'),
rule_id = dict(required=False, type='str'),
state = dict(default='present', choices=['present', 'absent']),
status = dict(default='enabled', choices=['enabled', 'disabled']),
storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
transition_days = dict(default=None, required=False, type='int'),
transition_date = dict(default=None, required=False, type='str')
name=dict(required=True, type='str'),
expiration_days=dict(default=None, required=False, type='int'),
expiration_date=dict(default=None, required=False, type='str'),
prefix=dict(default=None, required=False),
requester_pays=dict(default='no', type='bool'),
rule_id=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent']),
status=dict(default='enabled', choices=['enabled', 'disabled']),
storage_class=dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
transition_days=dict(default=None, required=False, type='int'),
transition_date=dict(default=None, required=False, type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive = [
[ 'expiration_days', 'expiration_date' ],
[ 'expiration_days', 'transition_date' ],
[ 'transition_days', 'transition_date' ],
[ 'transition_days', 'expiration_date' ]
]
mutually_exclusive=[
['expiration_days', 'expiration_date'],
['expiration_days', 'transition_date'],
['transition_days', 'transition_date'],
['transition_days', 'expiration_date']
]
)
if not HAS_BOTO:
@ -428,7 +429,7 @@ def main():
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
boto_required_version = (2,40,0)
boto_required_version = (2, 40, 0)
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")

@ -137,10 +137,10 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
target_bucket = dict(required=False, default=None),
target_prefix = dict(required=False, default=""),
state = dict(required=False, default='present', choices=['present', 'absent'])
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
)

@ -256,7 +256,6 @@ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
"""
calculates a multipart upload etag for amazon s3

@ -180,10 +180,10 @@ def _create_website_configuration(suffix, error_key, redirect_all_requests):
website_configuration = {}
if error_key is not None:
website_configuration['ErrorDocument'] = { 'Key': error_key }
website_configuration['ErrorDocument'] = {'Key': error_key}
if suffix is not None:
website_configuration['IndexDocument'] = { 'Suffix': suffix }
website_configuration['IndexDocument'] = {'Suffix': suffix}
if redirect_all_requests is not None:
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
@ -288,10 +288,10 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
mutually_exclusive=[
['redirect_all_requests', 'suffix'],
['redirect_all_requests', 'error_key']
])
])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')

@ -176,7 +176,7 @@ class SnsTopicManager(object):
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
**self.aws_connect_params)
**self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
@ -194,7 +194,6 @@ class SnsTopicManager(object):
break
return [t['TopicArn'] for t in topics]
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
@ -203,7 +202,6 @@ class SnsTopicManager(object):
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
self.changed = True
self.topic_created = True
@ -214,57 +212,51 @@ class SnsTopicManager(object):
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
def _set_topic_attrs(self):
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
['Attributes']
topic_attributes = self.connection.get_topic_attributes(self.arn_topic)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
self.display_name)
self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
json.dumps(self.policy))
json.dumps(self.policy))
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
json.dumps(self.delivery_policy))
json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['NextToken']
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse']
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
@ -284,7 +276,6 @@ class SnsTopicManager(object):
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
@ -295,14 +286,12 @@ class SnsTopicManager(object):
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
@ -319,7 +308,6 @@ class SnsTopicManager(object):
self._delete_subscriptions()
self._delete_topic()
def get_info(self):
info = {
'name': self.name,
@ -341,14 +329,13 @@ class SnsTopicManager(object):
return info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
'absent']),
'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),

@ -113,17 +113,18 @@ def assume_role_policy(connection, module):
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
role_arn = dict(required=True, default=None),
role_session_name = dict(required=True, default=None),
duration_seconds = dict(required=False, default=None, type='int'),
external_id = dict(required=False, default=None),
policy = dict(required=False, default=None),
mfa_serial_number = dict(required=False, default=None),
mfa_token = dict(required=False, default=None)
role_arn=dict(required=True, default=None),
role_session_name=dict(required=True, default=None),
duration_seconds=dict(required=False, default=None, type='int'),
external_id=dict(required=False, default=None),
policy=dict(required=False, default=None),
mfa_serial_number=dict(required=False, default=None),
mfa_token=dict(required=False, default=None)
)
)

@ -108,6 +108,7 @@ def normalize_credentials(credentials):
'expiration': expiration
}
def get_session_token(connection, module):
duration_seconds = module.params.get('duration_seconds')
mfa_serial_number = module.params.get('mfa_serial_number')
@ -131,13 +132,14 @@ def get_session_token(connection, module):
credentials = normalize_credentials(response.get('Credentials', {}))
module.exit_json(changed=changed, sts_creds=credentials)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
duration_seconds = dict(required=False, default=None, type='int'),
mfa_serial_number = dict(required=False, default=None),
mfa_token = dict(required=False, default=None)
duration_seconds=dict(required=False, default=None, type='int'),
mfa_serial_number=dict(required=False, default=None),
mfa_token=dict(required=False, default=None)
)
)

@ -512,7 +512,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
if self.wait_for_deployment_completion:
deployment_result = self.get_poller_result(result)
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
@ -535,7 +535,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait() # Blocking wait till the delete is finished
result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
@ -569,7 +569,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
(exc.status_code, exc.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
@ -642,10 +642,10 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label': ip.dns_settings.domain_name_label,
@ -657,9 +657,9 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
for nic_obj in (self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics)
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():

@ -154,6 +154,7 @@ def managed_disk_to_dict(managed_disk):
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(

@ -219,7 +219,7 @@ state:
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import NetworkInterface, NetworkInterfaceIPConfiguration, Subnet, \
PublicIPAddress, NetworkSecurityGroup
PublicIPAddress, NetworkSecurityGroup
except ImportError:
# This is handled in azure_rm_common
pass
@ -442,7 +442,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
if not pip and self.public_ip:
# create a default public_ip
pip = self.create_default_pip(self.resource_group, self.location, self.name,
self.public_ip_allocation_method)
self.public_ip_allocation_method)
nic = NetworkInterface(
location=self.location,
@ -475,8 +475,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
tags=results['tags'],
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method=
results['ip_configuration']['private_ip_allocation_method']
private_ip_allocation_method=results['ip_configuration']['private_ip_allocation_method']
)
]
)
@ -496,7 +495,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
#name=pip.name,
# name=pip.name,
if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name'])
@ -549,8 +548,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
subnet = self.network_client.subnets.get(self.resource_group, vnet_name, subnet_name)
except Exception as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(subnet_name,
vnet_name,
str(exc)))
vnet_name,
str(exc)))
return subnet
def get_security_group(self, name):

@ -193,7 +193,7 @@ class AzureRMPublicIPAddress(AzureRMModuleBase):
if self.domain_name != results['dns_settings'].get('domain_name_label'):
self.log('CHANGED: domain_name_label')
changed = True
results['dns_settings']['domain_name_label'] =self.domain_name
results['dns_settings']['domain_name_label'] = self.domain_name
if self.allocation_method != results['public_ip_allocation_method']:
self.log("CHANGED: allocation_method")

@ -183,7 +183,6 @@ class AzureRMPublicIPFacts(AzureRMModuleBase):
return results
def main():
AzureRMPublicIPFacts()

@ -155,7 +155,7 @@ try:
from azure.common import AzureMissingResourceHttpError
from azure.mgmt.storage.models import ProvisioningState, SkuName, SkuTier, Kind
from azure.mgmt.storage.models import StorageAccountUpdateParameters, CustomDomain, \
StorageAccountCreateParameters, Sku
StorageAccountCreateParameters, Sku
except ImportError:
# This is handled in azure_rm_common
pass
@ -226,7 +226,7 @@ class AzureRMStorageAccount(AzureRMModuleBase):
self.account_dict = self.get_account()
if self.state == 'present' and self.account_dict and \
self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE :
self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
"to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
@ -280,7 +280,7 @@ class AzureRMStorageAccount(AzureRMModuleBase):
resource_group=self.resource_group,
type=account_obj.type,
access_tier=(account_obj.access_tier.value
if account_obj.access_tier is not None else None),
if account_obj.access_tier is not None else None),
sku_tier=account_obj.sku.tier.value,
sku_name=account_obj.sku.name.value,
provisioning_state=account_obj.provisioning_state.value,

@ -132,7 +132,6 @@ except ImportError:
pass
def subnet_to_dict(subnet):
result = dict(
id=subnet.id,

@ -598,14 +598,14 @@ try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from azure.mgmt.compute.models import NetworkInterfaceReference, \
VirtualMachine, HardwareProfile, \
StorageProfile, OSProfile, OSDisk, DataDisk, \
VirtualHardDisk, ManagedDiskParameters, \
ImageReference, NetworkProfile, LinuxConfiguration, \
SshConfiguration, SshPublicKey, VirtualMachineSizeTypes, \
DiskCreateOptionTypes, Plan, SubResource
VirtualMachine, HardwareProfile, \
StorageProfile, OSProfile, OSDisk, DataDisk, \
VirtualHardDisk, ManagedDiskParameters, \
ImageReference, NetworkProfile, LinuxConfiguration, \
SshConfiguration, SshPublicKey, VirtualMachineSizeTypes, \
DiskCreateOptionTypes, Plan, SubResource
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, NetworkInterface, \
NetworkInterfaceIPConfiguration, Subnet
NetworkInterfaceIPConfiguration, Subnet
from azure.mgmt.storage.models import StorageAccountCreateParameters, Sku
from azure.mgmt.storage.models import Kind, SkuTier, SkuName
except ImportError:
@ -659,7 +659,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
open_ports=dict(type='list'),
network_interface_names=dict(type='list', aliases=['network_interfaces']),
remove_on_absent=dict(type='list', default=['all']),
virtual_network_resource_group=dict(type = 'str'),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
subnet_name=dict(type='str', aliases=['subnet']),
allocated=dict(type='bool', default=True),
@ -1297,7 +1297,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
nic_names = []
pip_names = []
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
# store the attached vhd info so we can nuke it after the VM is gone
if(vm.storage_profile.os_disk.managed_disk):
self.log('Storing managed disk ID for deletion')
@ -1319,7 +1319,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
self.results['deleted_managed_disk_ids'] = managed_disk_ids
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
# store the attached nic info so we can nuke them after the VM is gone
self.log('Storing NIC names for deletion.')
for interface in vm.network_profile.network_interfaces:
@ -1327,7 +1327,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
nic_names.append(id_dict['networkInterfaces'])
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
self.results['deleted_network_interfaces'] = nic_names
if self.remove_on_absent.intersection(set(['all','public_ips'])):
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
# also store each nic's attached public IPs and delete after the NIC is gone
for name in nic_names:
nic = self.get_network_interface(name)
@ -1349,18 +1349,18 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
# TODO: best-effort to keep deleting other linked resources if we encounter an error
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
self.log('Deleting VHDs')
self.delete_vm_storage(vhd_uris)
self.log('Deleting managed disks')
self.delete_managed_disks(managed_disk_ids)
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
self.log('Deleting network interfaces')
for name in nic_names:
self.delete_nic(name)
if self.remove_on_absent.intersection(set(['all','public_ips'])):
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
self.log('Deleting public IPs')
for name in pip_names:
self.delete_pip(name)
@ -1461,6 +1461,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
return ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
def get_availability_set(self, resource_group, name):
try:
return self.compute_client.availability_sets.get(resource_group, name)

@ -195,7 +195,7 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
self.dns_servers = None
self.purge_dns_servers = None
self.results=dict(
self.results = dict(
changed=False,
state=dict()
)
@ -327,7 +327,6 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_vnet(self, vnet):

@ -178,6 +178,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMNetworkInterfaceFacts()

@ -464,7 +464,7 @@ class AnsibleCloudStackHost(AnsibleCloudStack):
# Set host allocationstate to be disabled/enabled
elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()):
host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
else:
host['allocationstate'] = host['resourcestate']

@ -951,7 +951,7 @@ class TaskParameters(DockerBaseClass):
Returns parameters used to create a HostConfig object
'''
host_config_params=dict(
host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
@ -1163,7 +1163,7 @@ class TaskParameters(DockerBaseClass):
options = dict(
Type=self.log_driver,
Config = dict()
Config=dict()
)
if self.log_options is not None:
@ -1217,7 +1217,6 @@ class TaskParameters(DockerBaseClass):
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
@ -1570,7 +1569,7 @@ class Container(DockerBaseClass):
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
))
return expected_devices
def _get_expected_entrypoint(self):

@ -430,7 +430,7 @@ class ImageManager(DockerBaseClass):
if not self.check_mode:
status = None
try:
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])

@ -217,7 +217,7 @@ class ImageManager(DockerBaseClass):
def main():
argument_spec = dict(
name=dict(type='list'),
)
)
client = AnsibleDockerClient(
argument_spec=argument_spec

@ -184,6 +184,7 @@ class TaskParameters(DockerBaseClass):
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
class DockerNetworkManager(object):
def __init__(self, client):
@ -362,16 +363,16 @@ class DockerNetworkManager(object):
def main():
argument_spec = dict(
network_name = dict(type='str', required=True, aliases=['name']),
connected = dict(type='list', default=[], aliases=['containers']),
state = dict(type='str', default='present', choices=['present', 'absent']),
driver = dict(type='str', default='bridge'),
driver_options = dict(type='dict', default={}),
force = dict(type='bool', default=False),
appends = dict(type='bool', default=False, aliases=['incremental']),
ipam_driver = dict(type='str', default=None),
ipam_options = dict(type='dict', default={}),
debug = dict(type='bool', default=False)
network_name=dict(type='str', required=True, aliases=['name']),
connected=dict(type='list', default=[], aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str', default=None),
ipam_options=dict(type='dict', default={}),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(

@ -179,27 +179,26 @@ def grant_check(module, gs, obj):
try:
acp = obj.get_acl()
if module.params.get('permission') == 'public-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
return True
def key_check(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
if key_check:
grant_check(module, gs, key_check)
return True
@ -213,7 +212,7 @@ def keysum(module, gs, bucket, obj):
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
@ -223,7 +222,7 @@ def bucket_check(module, gs, bucket):
try:
result = gs.lookup(bucket)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
if result:
grant_check(module, gs, result)
return True
@ -237,7 +236,7 @@ def create_bucket(module, gs, bucket):
bucket.set_acl(module.params.get('permission'))
bucket.configure_versioning(module.params.get('versioning'))
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
if bucket:
return True
@ -251,7 +250,7 @@ def delete_bucket(module, gs, bucket):
bucket.delete()
return True
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def delete_key(module, gs, bucket, obj):
@ -260,7 +259,7 @@ def delete_key(module, gs, bucket, obj):
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def create_dirkey(module, gs, bucket, obj):
@ -270,7 +269,7 @@ def create_dirkey(module, gs, bucket, obj):
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def path_check(path):
@ -308,7 +307,7 @@ def upload_gsfile(module, gs, bucket, obj, src, expiry):
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except gs.provider.storage_copy_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def download_gsfile(module, gs, bucket, obj, dest):
@ -318,7 +317,7 @@ def download_gsfile(module, gs, bucket, obj, dest):
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except gs.provider.storage_copy_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def download_gsstr(module, gs, bucket, obj):
@ -328,7 +327,7 @@ def download_gsstr(module, gs, bucket, obj):
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except gs.provider.storage_copy_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def get_download_url(module, gs, bucket, obj, expiry):
@ -338,7 +337,7 @@ def get_download_url(module, gs, bucket, obj, expiry):
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
module.fail_json(msg=str(e))
def handle_get(module, gs, bucket, obj, overwrite, dest):
@ -355,7 +354,7 @@ def handle_get(module, gs, bucket, obj, overwrite, dest):
def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check to see if bucket exists to get ground truth.
bucket_rc = bucket_check(module, gs, bucket)
key_rc = key_check(module, gs, bucket, obj)
key_rc = key_check(module, gs, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc:
@ -380,7 +379,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
if bucket and obj:
@ -409,7 +408,7 @@ def handle_create(module, gs, bucket, obj):
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
else:
create_dirkey(module, gs, bucket, dirobj)
else:
@ -419,35 +418,35 @@ def handle_create(module, gs, bucket, obj):
def main():
module = AnsibleModule(
argument_spec = dict(
bucket = dict(required=True),
object = dict(default=None, type='path'),
src = dict(default=None),
dest = dict(default=None, type='path'),
expiration = dict(type='int', default=600, aliases=['expiry']),
mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
headers = dict(type='dict', default={}),
gs_secret_key = dict(no_log=True, required=True),
gs_access_key = dict(required=True),
overwrite = dict(default=True, type='bool', aliases=['force']),
region = dict(default='US', type='str'),
versioning = dict(default='no', type='bool')
argument_spec=dict(
bucket=dict(required=True),
object=dict(default=None, type='path'),
src=dict(default=None),
dest=dict(default=None, type='path'),
expiration=dict(type='int', default=600, aliases=['expiry']),
mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
headers=dict(type='dict', default={}),
gs_secret_key=dict(no_log=True, required=True),
gs_access_key=dict(required=True),
overwrite=dict(default=True, type='bool', aliases=['force']),
region=dict(default='US', type='str'),
versioning=dict(default='no', type='bool')
),
)
if not HAS_BOTO:
module.fail_json(msg='boto 2.9+ required for this module')
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
dest = module.params.get('dest')
mode = module.params.get('mode')
expiry = module.params.get('expiration')
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
dest = module.params.get('dest')
mode = module.params.get('mode')
expiry = module.params.get('expiration')
gs_secret_key = module.params.get('gs_secret_key')
gs_access_key = module.params.get('gs_access_key')
overwrite = module.params.get('overwrite')
overwrite = module.params.get('overwrite')
if mode == 'put':
if not src or not object:
@ -459,7 +458,7 @@ def main():
try:
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
module.fail_json(msg=str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):

@ -348,7 +348,7 @@ PROVIDER = Provider.GOOGLE
# I'm hard-coding the supported record types here, because they (hopefully!)
# shouldn't change much, and it allows me to use it as a "choices" parameter
# in an AnsibleModule argument_spec.
SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
################################################################################
@ -378,8 +378,8 @@ def create_record(module, gcdns, zone, record):
# The record doesn't match, so we need to check if we can overwrite it.
if not overwrite:
module.fail_json(
msg = 'cannot overwrite existing record, overwrite protection enabled',
changed = False
msg='cannot overwrite existing record, overwrite protection enabled',
changed=False
)
# The record either doesn't exist, or it exists and we can overwrite it.
@ -393,9 +393,9 @@ def create_record(module, gcdns, zone, record):
# not when combined (e.g., an 'A' record with "www.example.com"
# as its value).
module.fail_json(
msg = 'value is invalid for the given type: ' +
"%s, got value: %s" % (record_type, record_data),
changed = False
msg='value is invalid for the given type: ' +
"%s, got value: %s" % (record_type, record_data),
changed=False
)
elif error.code == 'cnameResourceRecordSetConflict':
@ -403,8 +403,8 @@ def create_record(module, gcdns, zone, record):
# already have another type of resource record with the name
# domain name.
module.fail_json(
msg = "non-CNAME resource record already exists: %s" % record_name,
changed = False
msg="non-CNAME resource record already exists: %s" % record_name,
changed=False
)
else:
@ -428,8 +428,8 @@ def create_record(module, gcdns, zone, record):
try:
gcdns.create_record(record.name, record.zone, record.type, record.data)
module.fail_json(
msg = 'error updating record, the original record was restored',
changed = False
msg='error updating record, the original record was restored',
changed=False
)
except LibcloudError:
# We deleted the old record, couldn't create the new record, and
@ -437,12 +437,12 @@ def create_record(module, gcdns, zone, record):
# record to the failure output so the user can resore it if
# necessary.
module.fail_json(
msg = 'error updating record, and could not restore original record, ' +
"original name: %s " % record.name +
"original zone: %s " % record.zone +
"original type: %s " % record.type +
"original data: %s" % record.data,
changed = True)
msg='error updating record, and could not restore original record, ' +
"original name: %s " % record.name +
"original zone: %s " % record.zone +
"original type: %s " % record.type +
"original data: %s" % record.data,
changed=True)
return True
@ -450,8 +450,8 @@ def create_record(module, gcdns, zone, record):
def remove_record(module, gcdns, record):
"""Remove a resource record."""
overwrite = module.boolean(module.params['overwrite'])
ttl = module.params['ttl']
overwrite = module.boolean(module.params['overwrite'])
ttl = module.params['ttl']
record_data = module.params['record_data']
# If there is no record, we're obviously done.
@ -463,10 +463,10 @@ def remove_record(module, gcdns, record):
if not overwrite:
if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
module.fail_json(
msg = 'cannot delete due to non-matching ttl or record_data: ' +
"ttl: %d, record_data: %s " % (ttl, record_data) +
"original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
changed = False
msg='cannot delete due to non-matching ttl or record_data: ' +
"ttl: %d, record_data: %s " % (ttl, record_data) +
"original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
changed=False
)
# If we got to this point, we're okay to delete the record.
@ -529,30 +529,30 @@ def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
def _sanity_check(module):
"""Run sanity checks that don't depend on info from the zone/record."""
overwrite = module.params['overwrite']
overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
ttl = module.params['ttl']
state = module.params['state']
ttl = module.params['ttl']
record_data = module.params['record_data']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
# A negative TTL is not permitted (how would they even work?!).
if ttl < 0:
module.fail_json(
msg = 'TTL cannot be less than zero, got: %d' % ttl,
changed = False
msg='TTL cannot be less than zero, got: %d' % ttl,
changed=False
)
# Deleting SOA records is not permitted.
@ -572,8 +572,8 @@ def _sanity_check(module):
socket.inet_aton(value)
except socket.error:
module.fail_json(
msg = 'invalid A record value, got: %s' % value,
changed = False
msg='invalid A record value, got: %s' % value,
changed=False
)
# AAAA records must contain valid IPv6 addresses.
@ -583,23 +583,23 @@ def _sanity_check(module):
socket.inet_pton(socket.AF_INET6, value)
except socket.error:
module.fail_json(
msg = 'invalid AAAA record value, got: %s' % value,
changed = False
msg='invalid AAAA record value, got: %s' % value,
changed=False
)
# CNAME and SOA records can't have multiple values.
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
module.fail_json(
msg = 'CNAME or SOA records cannot have more than one value, ' +
"got: %s" % record_data,
changed = False
msg='CNAME or SOA records cannot have more than one value, ' +
"got: %s" % record_data,
changed=False
)
# Google Cloud DNS does not support wildcard NS records.
if record_type == 'NS' and record_name[0] == '*':
module.fail_json(
msg = "wildcard NS records not allowed, got: %s" % record_name,
changed = False
msg="wildcard NS records not allowed, got: %s" % record_name,
changed=False
)
# Values for txt records must begin and end with a double quote.
@ -607,32 +607,32 @@ def _sanity_check(module):
for value in record_data:
if value[0] != '"' and value[-1] != '"':
module.fail_json(
msg = 'TXT record_data must be enclosed in double quotes, ' +
'got: %s' % value,
changed = False
msg='TXT record_data must be enclosed in double quotes, ' +
'got: %s' % value,
changed=False
)
def _additional_sanity_checks(module, zone):
"""Run input sanity checks that depend on info from the zone/record."""
overwrite = module.params['overwrite']
overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
state = module.params['state']
# CNAME records are not allowed to have the same name as the root domain.
if record_type == 'CNAME' and record_name == zone.domain:
module.fail_json(
msg = 'CNAME records cannot match the zone name',
changed = False
msg='CNAME records cannot match the zone name',
changed=False
)
# The root domain must always have an NS record.
if record_type == 'NS' and record_name == zone.domain and state == 'absent':
module.fail_json(
msg = 'cannot delete root NS records',
changed = False
msg='cannot delete root NS records',
changed=False
)
# Updating NS records with the name as the root domain is not allowed
@ -640,16 +640,16 @@ def _additional_sanity_checks(module, zone):
# records cannot be removed.
if record_type == 'NS' and record_name == zone.domain and overwrite:
module.fail_json(
msg = 'cannot update existing root NS records',
changed = False
msg='cannot update existing root NS records',
changed=False
)
# SOA records with names that don't match the root domain are not permitted
# (and wouldn't make sense anyway).
if record_type == 'SOA' and record_name != zone.domain:
module.fail_json(
msg = 'non-root SOA records are not permitted, got: %s' % record_name,
changed = False
msg='non-root SOA records are not permitted, got: %s' % record_name,
changed=False
)
@ -661,46 +661,46 @@ def main():
"""Main function"""
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
record = dict(required=True, aliases=['name'], type='str'),
zone = dict(type='str'),
zone_id = dict(type='str'),
type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
record_data = dict(aliases=['value'], type='list'),
ttl = dict(default=300, type='int'),
overwrite = dict(default=False, type='bool'),
service_account_email = dict(type='str'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(type='str')
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
record=dict(required=True, aliases=['name'], type='str'),
zone=dict(type='str'),
zone_id=dict(type='str'),
type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
record_data=dict(aliases=['value'], type='list'),
ttl=dict(default=300, type='int'),
overwrite=dict(default=False, type='bool'),
service_account_email=dict(type='str'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(type='str')
),
required_if = [
required_if=[
('state', 'present', ['record_data']),
('overwrite', False, ['record_data'])
],
required_one_of = [['zone', 'zone_id']],
supports_check_mode = True
required_one_of=[['zone', 'zone_id']],
supports_check_mode=True
)
_sanity_check(module)
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
ttl = module.params['ttl']
zone_name = module.params['zone']
zone_id = module.params['zone_id']
state = module.params['state']
ttl = module.params['ttl']
zone_name = module.params['zone']
zone_id = module.params['zone_id']
json_output = dict(
state = state,
record = record_name,
zone = zone_name,
zone_id = zone_id,
type = record_type,
record_data = module.params['record_data'],
ttl = ttl,
overwrite = module.boolean(module.params['overwrite'])
state=state,
record=record_name,
zone=zone_name,
zone_id=zone_id,
type=record_type,
record_data=module.params['record_data'],
ttl=ttl,
overwrite=module.boolean(module.params['overwrite'])
)
# Google Cloud DNS wants the trailing dot on all DNS names.
@ -718,13 +718,13 @@ def main():
zone = _get_zone(gcdns, zone_name, zone_id)
if zone is None and zone_name is not None:
module.fail_json(
msg = 'zone name was not found: %s' % zone_name,
changed = False
msg='zone name was not found: %s' % zone_name,
changed=False
)
elif zone is None and zone_id is not None:
module.fail_json(
msg = 'zone id was not found: %s' % zone_id,
changed = False
msg='zone id was not found: %s' % zone_id,
changed=False
)
# Populate the returns with the actual zone information.
@ -738,8 +738,8 @@ def main():
except InvalidRequestError:
# We gave Google Cloud DNS an invalid DNS record name.
module.fail_json(
msg = 'record name is invalid: %s' % record_name,
changed = False
msg='record name is invalid: %s' % record_name,
changed=False
)
_additional_sanity_checks(module, zone)
@ -752,20 +752,20 @@ def main():
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
record = record.data['name'],
type = record.data['type'],
record_data = record.data['rrdatas'],
ttl = record.data['ttl']
record=record.data['name'],
type=record.data['type'],
record_data=record.data['rrdatas'],
ttl=record.data['ttl']
)
diff['before_header'] = "%s:%s" % (record_type, record_name)
# Create, remove, or modify the record.
if state == 'present':
diff['after'] = dict(
record = record_name,
type = record_type,
record_data = module.params['record_data'],
ttl = ttl
record=record_name,
type=record_type,
record_data=module.params['record_data'],
ttl=ttl
)
diff['after_header'] = "%s:%s" % (record_type, record_name)

@ -145,18 +145,19 @@ MINIMUM_LIBCLOUD_VERSION = '0.19.0'
PROVIDER = Provider.GOOGLE
# The URL used to verify ownership of a zone in Google Cloud DNS.
ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
################################################################################
# Functions
################################################################################
def create_zone(module, gcdns, zone):
"""Creates a new Google Cloud DNS zone."""
description = module.params['description']
extra = dict(description = description)
zone_name = module.params['zone']
extra = dict(description=description)
zone_name = module.params['zone']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
@ -184,8 +185,8 @@ def create_zone(module, gcdns, zone):
# The zone name or a parameter might be completely invalid. This is
# typically caused by an illegal DNS name (e.g. foo..com).
module.fail_json(
msg = "zone name is not a valid DNS name: %s" % zone_name,
changed = False
msg="zone name is not a valid DNS name: %s" % zone_name,
changed=False
)
elif error.code == 'managedZoneDnsNameNotAvailable':
@ -193,8 +194,8 @@ def create_zone(module, gcdns, zone):
# names, such as TLDs, ccTLDs, or special domain names such as
# example.com.
module.fail_json(
msg = "zone name is reserved or already in use: %s" % zone_name,
changed = False
msg="zone name is reserved or already in use: %s" % zone_name,
changed=False
)
elif error.code == 'verifyManagedZoneDnsNameOwnership':
@ -202,8 +203,8 @@ def create_zone(module, gcdns, zone):
# it. This occurs when a user attempts to create a zone which shares
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
module.fail_json(
msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
changed = False
msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
changed=False
)
else:
@ -226,8 +227,8 @@ def remove_zone(module, gcdns, zone):
# refuse to remove the zone.
if len(zone.list_records()) > 2:
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
msg="zone is not empty and cannot be removed: %s" % zone.domain,
changed=False
)
try:
@ -246,8 +247,8 @@ def remove_zone(module, gcdns, zone):
# the milliseconds between the check and the removal command,
# records were added to the zone.
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
msg="zone is not empty and cannot be removed: %s" % zone.domain,
changed=False
)
else:
@ -273,6 +274,7 @@ def _get_zone(gcdns, zone_name):
return found_zone
def _sanity_check(module):
"""Run module sanity checks."""
@ -281,40 +283,41 @@ def _sanity_check(module):
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
# Google Cloud DNS does not support the creation of TLDs.
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
module.fail_json(
msg = 'cannot create top-level domain: %s' % zone_name,
changed = False
msg='cannot create top-level domain: %s' % zone_name,
changed=False
)
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
zone = dict(required=True, aliases=['name'], type='str'),
description = dict(default='', type='str'),
service_account_email = dict(type='str'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(type='str')
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
zone=dict(required=True, aliases=['name'], type='str'),
description=dict(default='', type='str'),
service_account_email=dict(type='str'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(type='str')
),
supports_check_mode = True
supports_check_mode=True
)
_sanity_check(module)
@ -327,9 +330,9 @@ def main():
zone_name = zone_name + '.'
json_output = dict(
state = state,
zone = zone_name,
description = module.params['description']
state=state,
zone=zone_name,
description=module.params['description']
)
# Build a connection object that was can use to connect with Google
@ -347,16 +350,16 @@ def main():
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
zone = zone.domain,
description = zone.extra['description']
zone=zone.domain,
description=zone.extra['description']
)
diff['before_header'] = zone_name
# Create or remove the zone.
if state == 'present':
diff['after'] = dict(
zone = zone_name,
description = module.params['description']
zone=zone_name,
description=module.params['description']
)
diff['after_header'] = zone_name

@ -377,7 +377,7 @@ EXAMPLES = """
- test-container-new-archive-destroyed-clone
"""
RETURN="""
RETURN = """
lxc_container:
description: container information
returned: success
@ -579,7 +579,7 @@ def create_script(command):
f.close()
# Ensure the script is executable.
os.chmod(script_file, int('0700',8))
os.chmod(script_file, int('0700', 8))
# Output log file.
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
@ -915,7 +915,7 @@ class LxcContainerManagement(object):
'ips': self.container.get_ips(),
'state': self._get_state(),
'init_pid': int(self.container.init_pid),
'name' : self.container_name,
'name': self.container_name,
}
def _unfreeze(self):
@ -1365,7 +1365,7 @@ class LxcContainerManagement(object):
:type source_dir: ``str``
"""
old_umask = os.umask(int('0077',8))
old_umask = os.umask(int('0077', 8))
archive_path = self.module.params.get('archive_path')
if not os.path.isdir(archive_path):
@ -1750,7 +1750,7 @@ def main():
)
),
supports_check_mode=False,
required_if = ([
required_if=([
('archive', True, ['archive_path'])
]),
)

@ -216,7 +216,7 @@ EXAMPLES = '''
flat: true
'''
RETURN='''
RETURN = '''
addresses:
description: Mapping from the network device name to a list of IPv4 addresses in the container
returned: when state is started or restarted
@ -328,7 +328,7 @@ class LXDContainerManagement(object):
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
def _change_state(self, action, force_stop=False):
body_json={'action': action, 'timeout': self.timeout}
body_json = {'action': action, 'timeout': self.timeout}
if force_stop:
body_json['force'] = True
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
@ -527,6 +527,7 @@ class LXDContainerManagement(object):
fail_params['logs'] = e.kwargs['logs']
self.module.fail_json(**fail_params)
def main():
"""Ansible Main module."""
@ -585,7 +586,7 @@ def main():
type='str',
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
),
trust_password=dict( type='str', no_log=True )
trust_password=dict(type='str', no_log=True)
),
supports_check_mode=False,
)

@ -347,6 +347,7 @@ failed = False
class RHEVConn(object):
'Connection to RHEV-M'
def __init__(self, module):
self.module = module
@ -726,11 +727,11 @@ class RHEVConn(object):
bond.append(ifacelist[slave])
try:
tmpiface = params.Bonding(
slaves = params.Slaves(host_nic = bond),
options = params.Options(
option = [
params.Option(name = 'miimon', value = '100'),
params.Option(name = 'mode', value = '4')
slaves=params.Slaves(host_nic=bond),
options=params.Options(
option=[
params.Option(name='miimon', value='100'),
params.Option(name='mode', value='4')
]
)
)
@ -741,16 +742,16 @@ class RHEVConn(object):
return False
try:
tmpnetwork = params.HostNIC(
network = params.Network(name = iface['network']),
name = iface['name'],
boot_protocol = iface['boot_protocol'],
ip = params.IP(
address = iface['ip'],
netmask = iface['netmask'],
gateway = iface['gateway']
network=params.Network(name=iface['network']),
name=iface['name'],
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
),
override_configuration = True,
bonding = tmpiface)
override_configuration=True,
bonding=tmpiface)
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
except Exception as e:
@ -760,13 +761,13 @@ class RHEVConn(object):
return False
else:
tmpnetwork = params.HostNIC(
network = params.Network(name = iface['network']),
name = iface['name'],
boot_protocol = iface['boot_protocol'],
ip = params.IP(
address = iface['ip'],
netmask = iface['netmask'],
gateway = iface['gateway']
network=params.Network(name=iface['network']),
name=iface['name'],
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
))
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
@ -828,8 +829,8 @@ class RHEVConn(object):
try:
HOST.nics.setupnetworks(params.Action(
force=True,
check_connectivity = False,
host_nics = params.HostNics(host_nic = networklist)
check_connectivity=False,
host_nics=params.HostNics(host_nic=networklist)
))
setMsg('nics are set')
except Exception as e:
@ -1008,24 +1009,24 @@ class RHEV(object):
VM = self.conn.get_VM(name)
if VM:
vminfo = dict()
vminfo['uuid'] = VM.id
vminfo['name'] = VM.name
vminfo['status'] = VM.status.state
vminfo['cpu_cores'] = VM.cpu.topology.cores
vminfo['uuid'] = VM.id
vminfo['name'] = VM.name
vminfo['status'] = VM.status.state
vminfo['cpu_cores'] = VM.cpu.topology.cores
vminfo['cpu_sockets'] = VM.cpu.topology.sockets
vminfo['cpu_shares'] = VM.cpu_shares
vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
vminfo['os'] = VM.get_os().type_
vminfo['del_prot'] = VM.delete_protected
vminfo['cpu_shares'] = VM.cpu_shares
vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
vminfo['os'] = VM.get_os().type_
vminfo['del_prot'] = VM.delete_protected
try:
vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
except Exception:
vminfo['host'] = None
vminfo['boot_order'] = []
vminfo['host'] = None
vminfo['boot_order'] = []
for boot_dev in VM.os.get_boot():
vminfo['boot_order'].append(str(boot_dev.dev))
vminfo['disks'] = []
vminfo['disks'] = []
for DISK in VM.disks.list():
disk = dict()
disk['name'] = DISK.name
@ -1033,7 +1034,7 @@ class RHEV(object):
disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
disk['interface'] = DISK.interface
vminfo['disks'].append(disk)
vminfo['ifaces'] = []
vminfo['ifaces'] = []
for NIC in VM.nics.list():
iface = dict()
iface['name'] = str(NIC.name)
@ -1083,17 +1084,17 @@ class RHEV(object):
bootselect = True
for disk in disks:
diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
disksize = disk.get('size', 1)
diskdomain = disk.get('domain', None)
diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
disksize = disk.get('size', 1)
diskdomain = disk.get('domain', None)
if diskdomain is None:
setMsg("`domain` is a required disk key.")
setFailed()
return False
diskinterface = disk.get('interface', 'virtio')
diskformat = disk.get('format', 'raw')
diskinterface = disk.get('interface', 'virtio')
diskformat = disk.get('format', 'raw')
diskallocationtype = disk.get('thin', False)
diskboot = disk.get('bootable', False)
diskboot = disk.get('bootable', False)
if bootselect is False and counter == 0:
diskboot = True
@ -1175,7 +1176,7 @@ class RHEV(object):
def setBootOrder(self, vmname, boot_order):
self.__get_conn()
VM = self.conn.get_VM(vmname)
bootorder = []
bootorder = []
for boot_dev in VM.os.get_boot():
bootorder.append(str(boot_dev.dev))
@ -1469,31 +1470,31 @@ def core(module):
def main():
global module
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
user = dict(default="admin@internal"),
password = dict(required=True, no_log=True),
server = dict(default="127.0.0.1"),
port = dict(default="443"),
insecure_api = dict(default=False, type='bool'),
name = dict(),
image = dict(default=False),
datacenter = dict(default="Default"),
type = dict(default="server", choices=['server', 'desktop', 'host']),
cluster = dict(default=''),
vmhost = dict(default=False),
vmcpu = dict(default="2"),
vmmem = dict(default="1"),
disks = dict(),
osver = dict(default="rhel_6x64"),
ifaces = dict(aliases=['nics', 'interfaces']),
timeout = dict(default=False),
mempol = dict(default="1"),
vm_ha = dict(default=True),
cpu_share = dict(default="0"),
boot_order = dict(default=["network", "hd"]),
del_prot = dict(default=True, type="bool"),
cd_drive = dict(default=False)
argument_spec=dict(
state=dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
user=dict(default="admin@internal"),
password=dict(required=True, no_log=True),
server=dict(default="127.0.0.1"),
port=dict(default="443"),
insecure_api=dict(default=False, type='bool'),
name=dict(),
image=dict(default=False),
datacenter=dict(default="Default"),
type=dict(default="server", choices=['server', 'desktop', 'host']),
cluster=dict(default=''),
vmhost=dict(default=False),
vmcpu=dict(default="2"),
vmmem=dict(default="1"),
disks=dict(),
osver=dict(default="rhel_6x64"),
ifaces=dict(aliases=['nics', 'interfaces']),
timeout=dict(default=False),
mempol=dict(default="1"),
vm_ha=dict(default=True),
cpu_share=dict(default="0"),
boot_order=dict(default=["network", "hd"]),
del_prot=dict(default=True, type="bool"),
cd_drive=dict(default=False)
),
)

@ -154,13 +154,13 @@ def get_service_name(module, stage):
def main():
module = AnsibleModule(
argument_spec=dict(
service_path = dict(required=True, type='path'),
state = dict(default='present', choices=['present', 'absent'], required=False),
functions = dict(type='list', required=False),
region = dict(default='', required=False),
stage = dict(default='', required=False),
deploy = dict(default=True, type='bool', required=False),
serverless_bin_path = dict(required=False, type='path')
service_path=dict(required=True, type='path'),
state=dict(default='present', choices=['present', 'absent'], required=False),
functions=dict(type='list', required=False),
region=dict(default='', required=False),
stage=dict(default='', required=False),
deploy=dict(default=True, type='bool', required=False),
serverless_bin_path=dict(required=False, type='path')
),
)
@ -198,13 +198,13 @@ def main():
if rc != 0:
if state == 'absent' and "-{}' does not exist".format(stage) in out:
module.exit_json(changed=False, state='absent', command=command,
out=out, service_name=get_service_name(module, stage))
out=out, service_name=get_service_name(module, stage))
module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
# gather some facts about the deployment
module.exit_json(changed=True, state='present', out=out, command=command,
service_name=get_service_name(module, stage))
service_name=get_service_name(module, stage))
if __name__ == '__main__':

@ -151,31 +151,32 @@ from ansible.module_utils._text import to_native
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
'undefine', 'destroy', 'get_xml', 'define',
'modify' ]
HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
'modify']
HOST_COMMANDS = ['list_nets', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0 : "inactive",
1 : "active"
0: "inactive",
1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0 : "no",
1 : "yes"
0: "no",
1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0 : "no",
1 : "yes"
0: "no",
1: "yes"
}
class EntryNotFound(Exception):
pass
@ -245,9 +246,9 @@ class LibvirtConnection(object):
if host is None:
# add the host
if not self.module.check_mode:
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
@ -259,9 +260,9 @@ class LibvirtConnection(object):
return False
else:
if not self.module.check_mode:
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
@ -286,18 +287,18 @@ class LibvirtConnection(object):
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
@ -331,7 +332,7 @@ class LibvirtConnection(object):
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
@ -358,7 +359,7 @@ class LibvirtConnection(object):
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def get_dhcp_leases(self, entryid):
network = self.find_entry(entryid)
@ -398,7 +399,7 @@ class VirtNetwork(object):
results = []
for entry in self.list_nets():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
@ -481,11 +482,11 @@ class VirtNetwork(object):
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
v = VirtNetwork(uri, module)
@ -494,33 +495,33 @@ def core(module):
if state and command == 'list_nets':
res = v.list_nets(state=state)
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg = "state change requires a specified name")
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if state in [ 'active' ]:
if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in [ 'present' ]:
elif state in ['present']:
try:
v.get_net(name)
except EntryNotFound:
if not xml:
module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
module.fail_json(msg="network '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in [ 'inactive' ]:
elif state in ['inactive']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in [ 'undefined', 'absent' ]:
elif state in ['undefined', 'absent']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
@ -535,10 +536,10 @@ def core(module):
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg = "%s requires 1 argument: name" % command)
module.fail_json(msg="%s requires 1 argument: name" % command)
if command in ('define', 'modify'):
if not xml:
module.fail_json(msg = command+" requires xml argument")
module.fail_json(msg=command + " requires xml argument")
try:
v.get_net(name)
except EntryNotFound:
@ -551,13 +552,13 @@ def core(module):
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
else:
@ -565,7 +566,7 @@ def core(module):
if autostart is not None:
if not name:
module.fail_json(msg = "state change requires a specified name")
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
@ -584,16 +585,16 @@ def core(module):
def main():
module = AnsibleModule (
argument_spec = dict(
name = dict(aliases=['network']),
state = dict(choices=['active', 'inactive', 'present', 'absent']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
autostart = dict(type='bool')
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['network']),
state=dict(choices=['active', 'inactive', 'present', 'absent']),
command=dict(choices=ALL_COMMANDS),
uri=dict(default='qemu:///system'),
xml=dict(),
autostart=dict(type='bool')
),
supports_check_mode = True
supports_check_mode=True
)
if not HAS_VIRT:
@ -612,7 +613,7 @@ def main():
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)

@ -165,49 +165,49 @@ from ansible.module_utils.basic import AnsibleModule
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
HOST_COMMANDS = ['list_pools', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0 : "inactive",
1 : "active"
0: "inactive",
1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0 : "no",
1 : "yes"
0: "no",
1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0 : "no",
1 : "yes"
0: "no",
1: "yes"
}
ENTRY_STATE_INFO_MAP = {
0 : "inactive",
1 : "building",
2 : "running",
3 : "degraded",
4 : "inaccessible"
0: "inactive",
1: "building",
2: "running",
3: "degraded",
4: "inaccessible"
}
ENTRY_BUILD_FLAGS_MAP = {
"new" : 0,
"repair" : 1,
"resize" : 2,
"no_overwrite" : 4,
"overwrite" : 8
"new": 0,
"repair": 1,
"resize": 2,
"no_overwrite": 4,
"overwrite": 8
}
ENTRY_DELETE_FLAGS_MAP = {
"normal" : 0,
"zeroed" : 1
"normal": 0,
"zeroed": 1
}
ALL_MODES = []
@ -283,18 +283,18 @@ class LibvirtConnection(object):
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
@ -378,7 +378,7 @@ class LibvirtConnection(object):
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
@ -405,7 +405,7 @@ class LibvirtConnection(object):
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
@ -441,7 +441,7 @@ class VirtStoragePool(object):
results = []
for entry in self.list_pools():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
@ -478,10 +478,10 @@ class VirtStoragePool(object):
return self.conn.define_from_xml(entryid, xml)
def build(self, entryid, flags):
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0))
def delete(self, entryid, flags):
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0))
def refresh(self, entryid):
return self.conn.refresh(entryid)
@ -501,10 +501,10 @@ class VirtStoragePool(object):
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
results[entry] = {
"status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
"size_total" : str(data[1]),
"size_used" : str(data[2]),
"size_available" : str(data[3]),
"status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"),
"size_total": str(data[1]),
"size_used": str(data[2]),
"size_available": str(data[3]),
}
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
@ -555,13 +555,13 @@ class VirtStoragePool(object):
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
mode = module.params.get('mode', None)
mode = module.params.get('mode', None)
v = VirtStoragePool(uri, module)
res = {}
@ -569,40 +569,40 @@ def core(module):
if state and command == 'list_pools':
res = v.list_pools(state=state)
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg = "state change requires a specified name")
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if state in [ 'active' ]:
if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in [ 'present' ]:
elif state in ['present']:
try:
v.get_pool(name)
except EntryNotFound:
if not xml:
module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in [ 'inactive' ]:
elif state in ['inactive']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in [ 'undefined', 'absent' ]:
elif state in ['undefined', 'absent']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
elif state in [ 'deleted' ]:
elif state in ['deleted']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
@ -618,10 +618,10 @@ def core(module):
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg = "%s requires 1 argument: name" % command)
module.fail_json(msg="%s requires 1 argument: name" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
module.fail_json(msg="define requires xml argument")
try:
v.get_pool(name)
except EntryNotFound:
@ -631,22 +631,22 @@ def core(module):
elif command == 'build':
res = v.build(name, mode)
if not isinstance(res, dict):
res = { 'changed': True, command: res }
res = {'changed': True, command: res}
return VIRT_SUCCESS, res
elif command == 'delete':
res = v.delete(name, mode)
if not isinstance(res, dict):
res = { 'changed': True, command: res }
res = {'changed': True, command: res}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = { command: res }
res = {command: res}
return VIRT_SUCCESS, res
else:
@ -654,7 +654,7 @@ def core(module):
if autostart is not None:
if not name:
module.fail_json(msg = "state change requires a specified name")
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
@ -673,17 +673,17 @@ def core(module):
def main():
module = AnsibleModule (
argument_spec = dict(
name = dict(aliases=['pool']),
state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
autostart = dict(type='bool'),
mode = dict(choices=ALL_MODES),
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['pool']),
state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
command=dict(choices=ALL_COMMANDS),
uri=dict(default='qemu:///system'),
xml=dict(),
autostart=dict(type='bool'),
mode=dict(choices=ALL_MODES),
),
supports_check_mode = True
supports_check_mode=True
)
if not HAS_VIRT:
@ -702,7 +702,7 @@ def main():
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)

@ -137,12 +137,14 @@ def change_keys(recs, key='uuid', filter_func=None):
return new_recs
def get_host(session):
"""Get the host"""
host_recs = session.xenapi.host.get_all()
# We only have one host, so just return its entry
return session.xenapi.host.get_record(host_recs[0])
def get_vms(session):
xs_vms = {}
recs = session.xenapi.VM.get_all()
@ -165,6 +167,7 @@ def get_srs(session):
xs_srs[sr['name_label']] = sr
return xs_srs
def main():
module = AnsibleModule({})

@ -137,20 +137,20 @@ from ansible.module_utils.openstack import openstack_full_argument_spec, opensta
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
id = dict(default=None),
checksum = dict(default=None),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
owner = dict(default=None),
min_disk = dict(type='int', default=0),
min_ram = dict(type='int', default=0),
is_public = dict(type='bool', default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(type='dict', default={}),
state = dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
id=dict(default=None),
checksum=dict(default=None),
disk_format=dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
container_format=dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
owner=dict(default=None),
min_disk=dict(type='int', default=0),
min_ram=dict(type='int', default=0),
is_public=dict(type='bool', default=False),
filename=dict(default=None),
ramdisk=dict(default=None),
kernel=dict(default=None),
properties=dict(type='dict', default={}),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
@ -163,13 +163,13 @@ def main():
changed = False
if module.params['checksum']:
image = cloud.get_image(name_or_id=None,filters={'checksum': module.params['checksum']})
image = cloud.get_image(name_or_id=None, filters={'checksum': module.params['checksum']})
else:
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
kwargs={}
kwargs = {}
if module.params['id'] is not None:
kwargs['id'] = module.params['id']
image = cloud.create_image(

@ -109,11 +109,11 @@ def _system_state_change(module, keypair):
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
public_key = dict(default=None),
public_key_file = dict(default=None),
state = dict(default='present',
choices=['absent', 'present']),
name=dict(required=True),
public_key=dict(default=None),
public_key_file=dict(default=None),
state=dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(

@ -194,21 +194,21 @@ def _system_state_change(module, flavor):
def main():
argument_spec = openstack_full_argument_spec(
state = dict(required=False, default='present',
choices=['absent', 'present']),
name = dict(required=False),
state=dict(required=False, default='present',
choices=['absent', 'present']),
name=dict(required=False),
# required when state is 'present'
ram = dict(required=False, type='int'),
vcpus = dict(required=False, type='int'),
disk = dict(required=False, type='int'),
ephemeral = dict(required=False, default=0, type='int'),
swap = dict(required=False, default=0, type='int'),
rxtx_factor = dict(required=False, default=1.0, type='float'),
is_public = dict(required=False, default=True, type='bool'),
flavorid = dict(required=False, default="auto"),
extra_specs = dict(required=False, default=None, type='dict'),
ram=dict(required=False, type='int'),
vcpus=dict(required=False, type='int'),
disk=dict(required=False, type='int'),
ephemeral=dict(required=False, default=0, type='int'),
swap=dict(required=False, default=0, type='int'),
rxtx_factor=dict(required=False, default=1.0, type='float'),
is_public=dict(required=False, default=True, type='bool'),
flavorid=dict(required=False, default="auto"),
extra_specs=dict(required=False, default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
@ -247,9 +247,9 @@ def main():
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
changed=True
changed = True
else:
changed=False
changed = False
old_extra_specs = flavor['extra_specs']
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])

@ -306,14 +306,17 @@ def _get_volume_quotas(cloud, project):
return cloud.get_volume_quotas(project)
def _get_network_quotas(cloud, project):
return cloud.get_network_quotas(project)
def _get_compute_quotas(cloud, project):
return cloud.get_compute_quotas(project)
def _get_quotas(module, cloud, project):
quota = {}
@ -334,6 +337,7 @@ def _get_quotas(module, cloud, project):
return quota
def _scrub_results(quota):
filter_attr = [
@ -350,6 +354,7 @@ def _scrub_results(quota):
return quota
def _system_state_change_details(module, project_quota_output):
quota_change_request = {}
@ -368,6 +373,7 @@ def _system_state_change_details(module, project_quota_output):
return (changes_required, quota_change_request)
def _system_state_change(module, project_quota_output):
"""
Determine if changes are required to the current project quota.
@ -386,6 +392,7 @@ def _system_state_change(module, project_quota_output):
else:
return False
def main():
argument_spec = openstack_full_argument_spec(
@ -427,8 +434,8 @@ def main():
)
module = AnsibleModule(argument_spec,
supports_check_mode=True
)
supports_check_mode=True
)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
@ -437,7 +444,7 @@ def main():
cloud_params = dict(module.params)
cloud = shade.operator_cloud(**cloud_params)
#In order to handle the different volume types we update module params after.
# In order to handle the different volume types we update module params after.
dynamic_types = [
'gigabytes_types',
'snapshots_types',
@ -448,22 +455,22 @@ def main():
for k, v in module.params[dynamic_type].items():
module.params[k] = int(v)
#Get current quota values
# Get current quota values
project_quota_output = _get_quotas(module, cloud, cloud_params['name'])
changes_required = False
if module.params['state'] == "absent":
#If a quota state is set to absent we should assume there will be changes.
#The default quota values are not accessible so we can not determine if
#no changes will occur or not.
# If a quota state is set to absent we should assume there will be changes.
# The default quota values are not accessible so we can not determine if
# no changes will occur or not.
if module.check_mode:
module.exit_json(changed=True)
#Calling delete_network_quotas when a quota has not been set results
#in an error, according to the shade docs it should return the
#current quota.
#The following error string is returned:
#network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
# Calling delete_network_quotas when a quota has not been set results
# in an error, according to the shade docs it should return the
# current quota.
# The following error string is returned:
# network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
neutron_msg1 = "network client call failed: Quota for tenant"
neutron_msg2 = "could not be found"
@ -495,7 +502,7 @@ def main():
quota_call = getattr(cloud, 'set_%s_quotas' % (quota_type))
quota_call(cloud_params['name'], **quota_change_request[quota_type])
#Get quota state post changes for validation
# Get quota state post changes for validation
project_quota_update = _get_quotas(module, cloud, cloud_params['name'])
if project_quota_output == project_quota_update:
@ -504,8 +511,8 @@ def main():
project_quota_output = project_quota_update
module.exit_json(changed=changes_required,
openstack_quotas=project_quota_output
)
openstack_quotas=project_quota_output
)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)

@ -212,8 +212,8 @@ def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
if ((module_min is None and module_max is None) and
(rule_min and int(rule_min) == 1 and
rule_max and int(rule_max) == 65535)):
# (None, None) == (1, 65535)
return True
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
@ -270,21 +270,21 @@ def _system_state_change(module, secgroup, remotegroup):
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
security_group=dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
protocol=dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min=dict(required=False, type='int'),
port_range_max=dict(required=False, type='int'),
remote_ip_prefix=dict(required=False, default=None),
remote_group=dict(required=False, default=None),
ethertype=dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction=dict(default='ingress',
choices=['egress', 'ingress']),
state=dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
@ -312,7 +312,7 @@ def main():
if remote_group:
remotegroup = cloud.get_security_group(remote_group)
else:
remotegroup = { 'id' : None }
remotegroup = {'id': None}
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))

@ -433,6 +433,7 @@ def _parse_nics(nics):
else:
yield net
def _network_args(module, cloud):
args = []
nics = module.params['nics']
@ -685,31 +686,31 @@ def _get_server_state(module, cloud):
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
image = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor = dict(default=None),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default=['default'], type='list'),
network = dict(default=None),
nics = dict(default=[], type='list'),
meta = dict(default=None, type='raw'),
userdata = dict(default=None, aliases=['user_data']),
config_drive = dict(default=False, type='bool'),
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
floating_ips = dict(default=None, type='list'),
floating_ip_pools = dict(default=None, type='list'),
volume_size = dict(default=False, type='int'),
boot_from_volume = dict(default=False, type='bool'),
boot_volume = dict(default=None, aliases=['root_volume']),
terminate_volume = dict(default=False, type='bool'),
volumes = dict(default=[], type='list'),
scheduler_hints = dict(default=None, type='dict'),
state = dict(default='present', choices=['absent', 'present']),
delete_fip = dict(default=False, type='bool'),
reuse_ips = dict(default=True, type='bool'),
name=dict(required=True),
image=dict(default=None),
image_exclude=dict(default='(deprecated)'),
flavor=dict(default=None),
flavor_ram=dict(default=None, type='int'),
flavor_include=dict(default=None),
key_name=dict(default=None),
security_groups=dict(default=['default'], type='list'),
network=dict(default=None),
nics=dict(default=[], type='list'),
meta=dict(default=None, type='raw'),
userdata=dict(default=None, aliases=['user_data']),
config_drive=dict(default=False, type='bool'),
auto_ip=dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
floating_ips=dict(default=None, type='list'),
floating_ip_pools=dict(default=None, type='list'),
volume_size=dict(default=False, type='int'),
boot_from_volume=dict(default=False, type='bool'),
boot_volume=dict(default=None, aliases=['root_volume']),
terminate_volume=dict(default=False, type='bool'),
volumes=dict(default=[], type='list'),
scheduler_hints=dict(default=None, type='dict'),
state=dict(default='present', choices=['absent', 'present']),
delete_fip=dict(default=False, type='bool'),
reuse_ips=dict(default=True, type='bool'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[

@ -1,5 +1,5 @@
#!/usr/bin/python
#coding: utf-8 -*-
# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
@ -166,12 +166,12 @@ from ansible.module_utils.openstack import openstack_full_argument_spec, opensta
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
@ -181,6 +181,7 @@ def _create_stack(module, stack, cloud):
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
@ -195,11 +196,12 @@ def _update_stack(module, stack, cloud):
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
@ -209,6 +211,7 @@ def _system_state_change(module, stack, cloud):
return True
return False
def main():
argument_spec = openstack_full_argument_spec(

@ -162,7 +162,7 @@ def main():
if not HAS_OVH:
module.fail_json(msg='ovh-api python module'
'is required to run this module ')
'is required to run this module ')
# Get parameters
name = module.params.get('name')

@ -113,7 +113,7 @@ PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
def serialize_sshkey(sshkey):
sshkey_data = {}
copy_keys = ['id', 'key', 'label','fingerprint']
copy_keys = ['id', 'key', 'label', 'fingerprint']
for name in copy_keys:
sshkey_data[name] = getattr(sshkey, name)
return sshkey_data
@ -132,7 +132,7 @@ def load_key_string(key_str):
key_str = key_str.strip()
ret_dict['key'] = key_str
cut_key = key_str.split()
if len(cut_key) in [2,3]:
if len(cut_key) in [2, 3]:
if len(cut_key) == 3:
ret_dict['label'] = cut_key[2]
else:
@ -165,7 +165,7 @@ def get_sshkey_selector(module):
return k.key == select_dict['key']
else:
# if key string not specified, all the fields must match
return all([select_dict[f] == getattr(k,f) for f in select_dict])
return all([select_dict[f] == getattr(k, f) for f in select_dict])
return selector
@ -188,10 +188,10 @@ def act_on_sshkeys(target_state, module, packet_conn):
newkey['label'] = module.params.get('label')
for param in ('label', 'key'):
if param not in newkey:
_msg=("If you want to ensure a key is present, you must "
"supply both a label and a key string, either in "
"module params, or in a key file. %s is missing"
% param)
_msg = ("If you want to ensure a key is present, you must "
"supply both a label and a key string, either in "
"module params, or in a key file. %s is missing"
% param)
raise Exception(_msg)
matching_sshkeys = []
new_key_response = packet_conn.create_ssh_key(
@ -208,7 +208,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
except Exception as e:
_msg = ("while trying to remove sshkey %s, id %s %s, "
"got error: %s" %
(k.label, k.id, target_state, e))
(k.label, k.id, target_state, e))
raise Exception(_msg)
return {
@ -220,7 +220,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
def main():
module = AnsibleModule(
argument_spec=dict(
state = dict(choices=['present', 'absent'], default='present'),
state=dict(choices=['present', 'absent'], default='present'),
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
label=dict(type='str', aliases=['name'], default=None),
@ -236,16 +236,16 @@ def main():
('key', 'fingerprint'),
('key', 'id'),
('key_file', 'key'),
]
]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
_fail_msg = ( "if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
_fail_msg = ("if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
@ -254,7 +254,7 @@ def main():
state = module.params.get('state')
if state in ['present','absent']:
if state in ['present', 'absent']:
try:
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
except Exception as e:

@ -238,7 +238,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
) + '" to complete.')
def _create_machine(module, profitbricks, datacenter, name):
@ -290,7 +290,7 @@ def _create_machine(module, profitbricks, datacenter, name):
n = NIC(
lan=int(lan)
)
)
s = Server(
name=name,
@ -299,7 +299,7 @@ def _create_machine(module, profitbricks, datacenter, name):
cpu_family=cpu_family,
create_volumes=[v],
nics=[n],
)
)
try:
create_server_response = profitbricks.create_server(
@ -341,7 +341,7 @@ def _create_datacenter(module, profitbricks):
i = Datacenter(
name=datacenter,
location=location
)
)
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
@ -624,7 +624,7 @@ def main():
if state == 'absent':
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required ' +
'for running or stopping machines.')
'for running or stopping machines.')
try:
(changed) = remove_virtual_machine(module, profitbricks)
@ -635,7 +635,7 @@ def main():
elif state in ('running', 'stopped'):
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required for ' +
'running or stopping machines.')
'running or stopping machines.')
try:
(changed) = startstop_machine(module, profitbricks, state)
module.exit_json(changed=changed)
@ -649,10 +649,10 @@ def main():
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is ' +
'required for new instance')
'required for new instance')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is ' +
'required for new instance')
'required for new instance')
try:
(machine_dict_array) = create_virtual_machine(module, profitbricks)

@ -118,7 +118,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
) + '" to complete.')
def _remove_datacenter(module, profitbricks, datacenter):
try:
@ -126,6 +127,7 @@ def _remove_datacenter(module, profitbricks, datacenter):
except Exception as e:
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
def create_datacenter(module, profitbricks):
"""
Creates a Datacenter
@ -148,7 +150,7 @@ def create_datacenter(module, profitbricks):
name=name,
location=location,
description=description
)
)
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
@ -166,6 +168,7 @@ def create_datacenter(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
def remove_datacenter(module, profitbricks):
"""
Removes a Datacenter.
@ -197,6 +200,7 @@ def remove_datacenter(module, profitbricks):
return changed
def main():
module = AnsibleModule(
argument_spec=dict(

@ -121,7 +121,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
) + '" to complete.')
def create_nic(module, profitbricks):
"""
@ -173,6 +174,7 @@ def create_nic(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to create the NIC: %s" % str(e))
def delete_nic(module, profitbricks):
"""
Removes a NIC
@ -228,12 +230,13 @@ def delete_nic(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
name=dict(default=str(uuid.uuid4()).replace('-', '')[:10]),
lan=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
@ -255,7 +258,6 @@ def main():
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')

@ -170,7 +170,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
) + '" to complete.')
def _create_volume(module, profitbricks, datacenter, name):
@ -194,7 +194,7 @@ def _create_volume(module, profitbricks, datacenter, name):
ssh_keys=ssh_keys,
disk_type=disk_type,
licence_type=licence_type
)
)
volume_response = profitbricks.create_volume(datacenter, v)

@ -118,7 +118,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
) + '" to complete.')
def attach_volume(module, profitbricks):
"""
@ -150,7 +151,7 @@ def attach_volume(module, profitbricks):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server= s['id']
server = s['id']
break
# Locate UUID for Volume
@ -163,6 +164,7 @@ def attach_volume(module, profitbricks):
return profitbricks.attach_volume(datacenter, server, volume)
def detach_volume(module, profitbricks):
"""
Detaches a volume.
@ -193,7 +195,7 @@ def detach_volume(module, profitbricks):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server= s['id']
server = s['id']
break
# Locate UUID for Volume
@ -206,6 +208,7 @@ def detach_volume(module, profitbricks):
return profitbricks.detach_volume(datacenter, server, volume)
def main():
module = AnsibleModule(
argument_spec=dict(

@ -93,7 +93,7 @@ from ansible.module_utils.rax import (NON_CALLABLES,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def cloud_block_storage_attachments(module, state, volume, server, device,

@ -148,7 +148,7 @@ def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True

@ -141,7 +141,7 @@ from ansible.module_utils.rax import (CLB_ALGORITHMS,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,

@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
DOCUMENTATION='''
DOCUMENTATION = '''
module: rax_clb_ssl
short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
description:
@ -100,7 +100,8 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
certificate, intermediate_certificate, secure_port,
@ -222,6 +223,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
else:
module.fail_json(**result)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(dict(

@ -72,7 +72,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def rax_dns(module, comment, email, name, state, ttl):

@ -128,7 +128,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,

@ -61,7 +61,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def rax_facts(module, address, name, server_id):

@ -91,7 +91,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
)
def rax_keypair(module, name, public_key, state):

@ -180,6 +180,7 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(

@ -256,6 +256,7 @@ def cloud_check(module, state, entity_id, label, check_type,
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save