ec2_asg: max_instance_lifetime and respect wait on replace (#66863)

* ec2_asg: max_instance_lifetime and respect wait on replace

* ec2_asg: max_instance_lifetime integration tests

* ec2_asg: address review comments
pull/65665/merge
Andrej Svenke 4 years ago committed by GitHub
parent d2f4d305ee
commit f98874e4f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,5 @@
minor_changes:
- 'ec2_asg: Migrated to AnsibleAWSModule'
- 'ec2_asg: Add support for Max Instance Lifetime'
bugfixes:
- 'ec2_asg: Ensure "wait" is honored during replace operations'

@ -81,6 +81,13 @@ options:
description: description:
- Maximum number of instances in group, if unspecified then the current group value will be used. - Maximum number of instances in group, if unspecified then the current group value will be used.
type: int type: int
max_instance_lifetime:
description:
- The maximum amount of time, in seconds, that an instance can be in service.
- Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified.
- Value of 0 removes lifetime restriction.
version_added: "2.10"
type: int
mixed_instances_policy: mixed_instances_policy:
description: description:
- A mixed instance policy to use for the ASG. - A mixed instance policy to use for the ASG.
@ -365,7 +372,6 @@ EXAMPLES = '''
tags: tags:
- environment: production - environment: production
propagate_at_launch: no propagate_at_launch: no
''' '''
RETURN = ''' RETURN = '''
@ -452,6 +458,11 @@ load_balancers:
returned: success returned: success
type: list type: list
sample: ["elb-webapp-prod"] sample: ["elb-webapp-prod"]
max_instance_lifetime:
description: The maximum amount of time, in seconds, that an instance can be in service.
returned: success
type: int
sample: 604800
max_size: max_size:
description: Maximum size of group description: Maximum size of group
returned: success returned: success
@ -511,7 +522,7 @@ target_group_names:
termination_policies: termination_policies:
description: A list of termination policies for the group. description: A list of termination policies for the group.
returned: success returned: success
type: str type: list
sample: ["Default"] sample: ["Default"]
unhealthy_instances: unhealthy_instances:
description: Number of instances in an unhealthy state description: Number of instances in an unhealthy state
@ -544,8 +555,11 @@ import time
import traceback import traceback
from ansible.module_utils._text import to_native from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry from ansible.module_utils.ec2 import (
AWSRetry,
camel_dict_to_snake_dict
)
try: try:
import botocore import botocore
@ -556,8 +570,9 @@ from ansible.module_utils.aws.core import AnsibleAWSModule
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup', 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize',
'TerminationPolicies', 'VPCZoneIdentifier') 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies',
'VPCZoneIdentifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
@ -693,37 +708,37 @@ def enforce_required_arguments_for_create():
def get_properties(autoscaling_group): def get_properties(autoscaling_group):
properties = dict() properties = dict(
properties['healthy_instances'] = 0 healthy_instances=0,
properties['in_service_instances'] = 0 in_service_instances=0,
properties['unhealthy_instances'] = 0 unhealthy_instances=0,
properties['pending_instances'] = 0 pending_instances=0,
properties['viable_instances'] = 0 viable_instances=0,
properties['terminating_instances'] = 0 terminating_instances=0
)
instance_facts = dict() instance_facts = dict()
autoscaling_group_instances = autoscaling_group.get('Instances') autoscaling_group_instances = autoscaling_group.get('Instances')
if autoscaling_group_instances:
if autoscaling_group_instances:
properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
for i in autoscaling_group_instances: for i in autoscaling_group_instances:
if i.get('LaunchConfigurationName'): instance_facts[i['InstanceId']] = {
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'], 'lifecycle_state': i['LifecycleState']
'launch_config_name': i['LaunchConfigurationName']} }
elif i.get('LaunchTemplate'): if 'LaunchConfigurationName' in i:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName']
'lifecycle_state': i['LifecycleState'], elif 'LaunchTemplate' in i:
'launch_template': i['LaunchTemplate']} instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate']
else:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState']}
if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
properties['viable_instances'] += 1 properties['viable_instances'] += 1
if i['HealthStatus'] == 'Healthy': if i['HealthStatus'] == 'Healthy':
properties['healthy_instances'] += 1 properties['healthy_instances'] += 1
else: else:
properties['unhealthy_instances'] += 1 properties['unhealthy_instances'] += 1
if i['LifecycleState'] == 'InService': if i['LifecycleState'] == 'InService':
properties['in_service_instances'] += 1 properties['in_service_instances'] += 1
if i['LifecycleState'] == 'Terminating': if i['LifecycleState'] == 'Terminating':
@ -739,11 +754,12 @@ def get_properties(autoscaling_group):
properties['created_time'] = autoscaling_group.get('CreatedTime') properties['created_time'] = autoscaling_group.get('CreatedTime')
properties['instance_facts'] = instance_facts properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
if autoscaling_group.get('LaunchConfigurationName'): if 'LaunchConfigurationName' in autoscaling_group:
properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
else: else:
properties['launch_template'] = autoscaling_group.get('LaunchTemplate') properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
properties['tags'] = autoscaling_group.get('Tags') properties['tags'] = autoscaling_group.get('Tags')
properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime')
properties['min_size'] = autoscaling_group.get('MinSize') properties['min_size'] = autoscaling_group.get('MinSize')
properties['max_size'] = autoscaling_group.get('MaxSize') properties['max_size'] = autoscaling_group.get('MaxSize')
properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
@ -764,19 +780,19 @@ def get_properties(autoscaling_group):
properties['metrics_collection'] = metrics properties['metrics_collection'] = metrics
if properties['target_group_arns']: if properties['target_group_arns']:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) elbv2_connection = module.client('elbv2')
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
tg_paginator = elbv2_connection.get_paginator('describe_target_groups') tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result() tg_result = tg_paginator.paginate(
TargetGroupArns=properties['target_group_arns']
).build_full_result()
target_groups = tg_result['TargetGroups'] target_groups = tg_result['TargetGroups']
else: else:
target_groups = [] target_groups = []
properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups]
properties['target_group_names'] = [
tg['TargetGroupName']
for tg in target_groups
]
return properties return properties
@ -822,17 +838,11 @@ def get_launch_object(connection, ec2_connection):
def elb_dreg(asg_connection, group_name, instance_id): def elb_dreg(asg_connection, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0] as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
count = 1 count = 1
if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
elb_connection = boto3_conn(module, elb_connection = module.client('elb')
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
else: else:
return return
@ -925,7 +935,6 @@ def tg_healthy(asg_connection, elbv2_connection, group_name):
def wait_for_elb(asg_connection, group_name): def wait_for_elb(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance # if the health_check_type is ELB, we want to query the ELBs directly for instance
@ -934,12 +943,7 @@ def wait_for_elb(asg_connection, group_name):
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for ELB to consider instances healthy.") module.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module, elb_connection = module.client('elb')
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
@ -955,7 +959,6 @@ def wait_for_elb(asg_connection, group_name):
def wait_for_target_group(asg_connection, group_name): def wait_for_target_group(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance # if the health_check_type is ELB, we want to query the ELBs directly for instance
@ -964,12 +967,7 @@ def wait_for_target_group(asg_connection, group_name):
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for Target Group to consider instances healthy.") module.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module, elbv2_connection = module.client('elbv2')
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
@ -1016,6 +1014,7 @@ def create_autoscaling_group(connection):
mixed_instances_policy = module.params.get('mixed_instances_policy') mixed_instances_policy = module.params.get('mixed_instances_policy')
min_size = module.params['min_size'] min_size = module.params['min_size']
max_size = module.params['max_size'] max_size = module.params['max_size']
max_instance_lifetime = module.params.get('max_instance_lifetime')
placement_group = module.params.get('placement_group') placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity') desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier') vpc_zone_identifier = module.params.get('vpc_zone_identifier')
@ -1031,19 +1030,14 @@ def create_autoscaling_group(connection):
metrics_collection = module.params.get('metrics_collection') metrics_collection = module.params.get('metrics_collection')
metrics_granularity = module.params.get('metrics_granularity') metrics_granularity = module.params.get('metrics_granularity')
metrics_list = module.params.get('metrics_list') metrics_list = module.params.get('metrics_list')
try: try:
as_groups = describe_autoscaling_groups(connection, group_name) as_groups = describe_autoscaling_groups(connection, group_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe auto scaling groups.", module.fail_json(msg="Failed to describe auto scaling groups.",
exception=traceback.format_exc()) exception=traceback.format_exc())
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ec2_connection = module.client('ec2')
ec2_connection = boto3_conn(module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
if vpc_zone_identifier: if vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier) vpc_zone_identifier = ','.join(vpc_zone_identifier)
@ -1086,6 +1080,8 @@ def create_autoscaling_group(connection):
ag['LoadBalancerNames'] = load_balancers ag['LoadBalancerNames'] = load_balancers
if target_group_arns: if target_group_arns:
ag['TargetGroupARNs'] = target_group_arns ag['TargetGroupARNs'] = target_group_arns
if max_instance_lifetime:
ag['MaxInstanceLifetime'] = max_instance_lifetime
launch_object = get_launch_object(connection, ec2_connection) launch_object = get_launch_object(connection, ec2_connection)
if 'LaunchConfigurationName' in launch_object: if 'LaunchConfigurationName' in launch_object:
@ -1283,6 +1279,9 @@ def create_autoscaling_group(connection):
ag['AvailabilityZones'] = availability_zones ag['AvailabilityZones'] = availability_zones
if vpc_zone_identifier: if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier ag['VPCZoneIdentifier'] = vpc_zone_identifier
if max_instance_lifetime is not None:
ag['MaxInstanceLifetime'] = max_instance_lifetime
try: try:
update_asg(connection, **ag) update_asg(connection, **ag)
@ -1375,7 +1374,6 @@ def get_chunks(l, n):
def update_size(connection, group, max_size, min_size, dc): def update_size(connection, group, max_size, min_size, dc):
module.debug("setting ASG sizes") module.debug("setting ASG sizes")
module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
updated_group = dict() updated_group = dict()
@ -1389,6 +1387,7 @@ def update_size(connection, group, max_size, min_size, dc):
def replace(connection): def replace(connection):
batch_size = module.params.get('replace_batch_size') batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
wait_for_instances = module.params.get('wait_for_instances')
group_name = module.params.get('name') group_name = module.params.get('name')
max_size = module.params.get('max_size') max_size = module.params.get('max_size')
min_size = module.params.get('min_size') min_size = module.params.get('min_size')
@ -1399,7 +1398,7 @@ def replace(connection):
lc_check = module.params.get('lc_check') lc_check = module.params.get('lc_check')
else: else:
lc_check = False lc_check = False
# Mirror above behaviour for Launch Templates # Mirror above behavior for Launch Templates
launch_template = module.params.get('launch_template') launch_template = module.params.get('launch_template')
if launch_template: if launch_template:
lt_check = module.params.get('lt_check') lt_check = module.params.get('lt_check')
@ -1412,7 +1411,9 @@ def replace(connection):
if desired_capacity is None: if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity'] desired_capacity = as_group['DesiredCapacity']
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group) props = get_properties(as_group)
instances = props['instances'] instances = props['instances']
if replace_all_instances: if replace_all_instances:
@ -1437,7 +1438,7 @@ def replace(connection):
as_group = describe_autoscaling_groups(connection, group_name)[0] as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group) props = get_properties(as_group)
changed = True changed = True
return(changed, props) return changed, props
# we don't want to spin up extra instances if not necessary # we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size: if num_new_inst_needed < batch_size:
@ -1446,7 +1447,7 @@ def replace(connection):
if not old_instances: if not old_instances:
changed = False changed = False
return(changed, props) return changed, props
# check if min_size/max_size/desired capacity have been specified and if not use ASG values # check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None: if min_size is None:
@ -1459,36 +1460,42 @@ def replace(connection):
as_group = describe_autoscaling_groups(connection, group_name)[0] as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name) if wait_for_instances:
wait_for_target_group(connection, group_name) wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0] as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group) props = get_properties(as_group)
instances = props['instances'] instances = props['instances']
if replace_instances: if replace_instances:
instances = replace_instances instances = replace_instances
module.debug("beginning main loop") module.debug("beginning main loop")
for i in get_chunks(instances, batch_size): for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances # break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') if wait_for_instances:
wait_for_elb(connection, group_name) wait_for_term_inst(connection, term_instances)
wait_for_target_group(connection, group_name) wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
as_group = describe_autoscaling_groups(connection, group_name)[0] wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
if break_early: if break_early:
module.debug("breaking loop") module.debug("breaking loop")
break break
update_size(connection, as_group, max_size, min_size, desired_capacity) update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0] as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group) asg_properties = get_properties(as_group)
module.debug("Rolling update complete.") module.debug("Rolling update complete.")
changed = True changed = True
return(changed, asg_properties) return changed, asg_properties
def get_instances_by_launch_config(props, lc_check, initial_instances): def get_instances_by_launch_config(props, lc_check, initial_instances):
new_instances = [] new_instances = []
old_instances = [] old_instances = []
# old instances are those that have the old launch config # old instances are those that have the old launch config
@ -1509,6 +1516,7 @@ def get_instances_by_launch_config(props, lc_check, initial_instances):
new_instances.append(i) new_instances.append(i)
else: else:
old_instances.append(i) old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
@ -1535,6 +1543,7 @@ def get_instances_by_launch_template(props, lt_check, initial_instances):
new_instances.append(i) new_instances.append(i)
else: else:
old_instances.append(i) old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
@ -1546,23 +1555,25 @@ def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initi
instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG # check to make sure instances given are actually in the given ASG
# and they have a non-current launch config # and they have a non-current launch config
if module.params.get('launch_config_name'): if 'launch_config_name' in module.params:
if lc_check: if lc_check:
for i in instances: for i in instances:
if 'launch_template' in props['instance_facts'][i]: if (
instances_to_terminate.append(i) 'launch_template' in props['instance_facts'][i]
elif props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']
):
instances_to_terminate.append(i) instances_to_terminate.append(i)
else: else:
for i in instances: for i in instances:
if i in initial_instances: if i in initial_instances:
instances_to_terminate.append(i) instances_to_terminate.append(i)
elif module.params.get('launch_template'): elif 'launch_template' in module.params:
if lt_check: if lt_check:
for i in instances: for i in instances:
if 'launch_config_name' in props['instance_facts'][i]: if (
instances_to_terminate.append(i) 'launch_config_name' in props['instance_facts'][i]
elif props['instance_facts'][i]['launch_template'] != props['launch_template']: or props['instance_facts'][i]['launch_template'] != props['launch_template']
):
instances_to_terminate.append(i) instances_to_terminate.append(i)
else: else:
for i in instances: for i in instances:
@ -1666,7 +1677,6 @@ def wait_for_term_inst(connection, term_instances):
def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop. # make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0] as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group) props = get_properties(as_group)
@ -1692,56 +1702,66 @@ def asg_exists(connection):
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = dict(
argument_spec.update( name=dict(required=True, type='str'),
dict( load_balancers=dict(type='list'),
name=dict(required=True, type='str'), target_group_arns=dict(type='list'),
load_balancers=dict(type='list'), availability_zones=dict(type='list'),
target_group_arns=dict(type='list'), launch_config_name=dict(type='str'),
availability_zones=dict(type='list'), launch_template=dict(
launch_config_name=dict(type='str'), type='dict',
launch_template=dict(type='dict', default=None,
default=None, options=dict(
options=dict( version=dict(type='str'),
version=dict(type='str'), launch_template_name=dict(type='str'),
launch_template_name=dict(type='str'), launch_template_id=dict(type='str'),
launch_template_id=dict(type='str'), )
), ),
), min_size=dict(type='int'),
mixed_instances_policy=dict(type='dict', max_size=dict(type='int'),
default=None, max_instance_lifetime=dict(type='int'),
options=dict( mixed_instances_policy=dict(
instance_types=dict(type='list', elements='str'), type='dict',
)), default=None,
min_size=dict(type='int'), options=dict(
max_size=dict(type='int'), instance_types=dict(
placement_group=dict(type='str'), type='list',
desired_capacity=dict(type='int'), elements='str'
vpc_zone_identifier=dict(type='list'), ),
replace_batch_size=dict(type='int', default=1), )
replace_all_instances=dict(type='bool', default=False), ),
replace_instances=dict(type='list', default=[]), placement_group=dict(type='str'),
lc_check=dict(type='bool', default=True), desired_capacity=dict(type='int'),
lt_check=dict(type='bool', default=True), vpc_zone_identifier=dict(type='list'),
wait_timeout=dict(type='int', default=300), replace_batch_size=dict(type='int', default=1),
state=dict(default='present', choices=['present', 'absent']), replace_all_instances=dict(type='bool', default=False),
tags=dict(type='list', default=[]), replace_instances=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300), lc_check=dict(type='bool', default=True),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), lt_check=dict(type='bool', default=True),
default_cooldown=dict(type='int', default=300), wait_timeout=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']),
termination_policies=dict(type='list', default='Default'), tags=dict(type='list', default=[]),
notification_topic=dict(type='str', default=None), health_check_period=dict(type='int', default=300),
notification_types=dict(type='list', default=[ health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(
type='list',
default=[
'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]), ]
suspend_processes=dict(type='list', default=[]), ),
metrics_collection=dict(type='bool', default=False), suspend_processes=dict(type='list', default=[]),
metrics_granularity=dict(type='str', default='1Minute'), metrics_collection=dict(type='bool', default=False),
metrics_list=dict(type='list', default=[ metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(
type='list',
default=[
'GroupMinSize', 'GroupMinSize',
'GroupMaxSize', 'GroupMaxSize',
'GroupDesiredCapacity', 'GroupDesiredCapacity',
@ -1750,8 +1770,8 @@ def main():
'GroupStandbyInstances', 'GroupStandbyInstances',
'GroupTerminatingInstances', 'GroupTerminatingInstances',
'GroupTotalInstances' 'GroupTotalInstances'
]) ]
), )
) )
global module global module
@ -1759,26 +1779,31 @@ def main():
argument_spec=argument_spec, argument_spec=argument_spec,
mutually_exclusive=[ mutually_exclusive=[
['replace_all_instances', 'replace_instances'], ['replace_all_instances', 'replace_instances'],
['launch_config_name', 'launch_template']] ['launch_config_name', 'launch_template']
]
) )
if not HAS_BOTO3: if (
module.fail_json(msg='boto3 required for this module') module.params.get('max_instance_lifetime') is not None
and not module.botocore_at_least('1.13.21')
if module.params.get('mixed_instance_type') and not module.botocore_at_least('1.12.45'): ):
module.fail_json(msg="mixed_instance_type is only supported with botocore >= 1.12.45") module.fail_json(
msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.'
)
if (
module.params.get('mixed_instances_policy') is not None
and not module.botocore_at_least('1.12.45')
):
module.fail_json(
msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.'
)
state = module.params.get('state') state = module.params.get('state')
replace_instances = module.params.get('replace_instances') replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances') replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = module.client('autoscaling')
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
changed = create_changed = replace_changed = False changed = create_changed = replace_changed = False
exists = asg_exists(connection) exists = asg_exists(connection)
@ -1789,10 +1814,16 @@ def main():
module.exit_json(changed=changed) module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call # Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')): if (
exists
and (replace_all_instances or replace_instances)
and (module.params.get('launch_config_name') or module.params.get('launch_template'))
):
replace_changed, asg_properties = replace(connection) replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed: if create_changed or replace_changed:
changed = True changed = True
module.exit_json(changed=changed, **asg_properties) module.exit_json(changed=changed, **asg_properties)

@ -95,7 +95,7 @@
# was created # was created
set_fact: set_fact:
load_balancer_name: "{{ item }}-lb" load_balancer_name: "{{ item }}-lb"
with_items: "{{ resource_prefix | regex_findall('.{8}$') }}" loop: "{{ resource_prefix | regex_findall('.{8}$') }}"
# Set up the testing dependencies: VPC, subnet, security group, and two launch configurations # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
@ -164,7 +164,7 @@
- "service httpd start" - "service httpd start"
security_groups: "{{ sg.group_id }}" security_groups: "{{ sg.group_id }}"
instance_type: t3.micro instance_type: t3.micro
with_items: loop:
- "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc"
- "{{ resource_prefix }}-lc-2" - "{{ resource_prefix }}-lc-2"
@ -314,6 +314,10 @@
name: "{{ resource_prefix }}-asg" name: "{{ resource_prefix }}-asg"
state: absent state: absent
wait_timeout: 800 wait_timeout: 800
register: output
retries: 3
until: output is succeeded
delay: 10
async: 400 async: 400
# ============================================================ # ============================================================
@ -411,6 +415,43 @@
# ============================================================ # ============================================================
# Test max_instance_lifetime option
- name: enable asg max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
max_instance_lifetime: 604801
register: output
- name: ensure max_instance_lifetime is set
assert:
that:
- output.max_instance_lifetime == 604801
- name: run without max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
launch_config_name: "{{ resource_prefix }}-lc"
- name: ensure max_instance_lifetime not affected by defaults
assert:
that:
- output.max_instance_lifetime == 604801
- name: disable asg max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
launch_config_name: "{{ resource_prefix }}-lc"
max_instance_lifetime: 0
register: output
- name: ensure max_instance_lifetime is not set
assert:
that:
- not output.max_instance_lifetime
# ============================================================
# # perform rolling replace with different launch configuration # # perform rolling replace with different launch configuration
- name: perform rolling update to new AMI - name: perform rolling update to new AMI
@ -434,7 +475,7 @@
- assert: - assert:
that: that:
- "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'" - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
with_dict: "{{ output.instance_facts }}" loop: "{{ output.instance_facts | dict2items }}"
# assert they are all healthy and that the rolling update resulted in the appropriate number of instances # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
- assert: - assert:
@ -466,7 +507,7 @@
- assert: - assert:
that: that:
- "item.value.launch_config_name == '{{ resource_prefix }}-lc'" - "item.value.launch_config_name == '{{ resource_prefix }}-lc'"
with_dict: "{{ output.instance_facts }}" loop: "{{ output.instance_facts | dict2items }}"
# assert they are all healthy and that the rolling update resulted in the appropriate number of instances # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
# there should be the same number of instances as there were before the rolling update was performed # there should be the same number of instances as there were before the rolling update was performed
@ -502,22 +543,21 @@
poll: 0 poll: 0
register: asg_job register: asg_job
- name: get ec2_asg facts for 3 minutes - name: get ec2_asg info for 3 minutes
ec2_asg_info: ec2_asg_info:
name: "{{ resource_prefix }}-asg" name: "{{ resource_prefix }}-asg"
register: output register: output
loop_control: loop_control:
pause: 15 pause: 15
with_sequence: count=12 loop: "{{ range(12) | list }}"
- set_fact:
inst_id_json_query: 'results[*].results[*].instances[*].instance_id'
# Since we started with 3 servers and replace all of them. # Since we started with 3 servers and replace all of them.
# We should see 6 servers total. # We should see 6 servers total.
- assert: - assert:
that: that:
- "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 6" - output | json_query(inst_id_json_query) | unique | length == 6
vars:
inst_id_json_query: results[].results[].instances[].instance_id
- name: Ensure ec2_asg task completes - name: Ensure ec2_asg task completes
async_status: jid="{{ asg_job.ansible_job_id }}" async_status: jid="{{ asg_job.ansible_job_id }}"
@ -568,16 +608,15 @@
register: output register: output
loop_control: loop_control:
pause: 15 pause: 15
with_sequence: count=12 loop: "{{ range(12) | list }}"
- set_fact:
inst_id_json_query: 'results[*].results[*].instances[*].instance_id'
# Get all instance_ids we saw and assert we saw number expected # Get all instance_ids we saw and assert we saw number expected
# Should only see 3 (don't replace instances we just created) # Should only see 3 (don't replace instances we just created)
- assert: - assert:
that: that:
- "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 3" - output | json_query(inst_id_json_query) | unique | length == 3
vars:
inst_id_json_query: results[].results[].instances[].instance_id
- name: Ensure ec2_asg task completes - name: Ensure ec2_asg task completes
async_status: jid="{{ asg_job.ansible_job_id }}" async_status: jid="{{ asg_job.ansible_job_id }}"
@ -673,7 +712,7 @@
until: removed is not failed until: removed is not failed
ignore_errors: yes ignore_errors: yes
retries: 10 retries: 10
with_items: loop:
- "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc"
- "{{ resource_prefix }}-lc-2" - "{{ resource_prefix }}-lc-2"

Loading…
Cancel
Save