diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py index e4241b0d7dc..1935f0bdc18 100644 --- a/test/integration/cleanup_ec2.py +++ b/test/integration/cleanup_ec2.py @@ -12,6 +12,7 @@ import optparse import yaml import os.path import boto.ec2.elb +import time def delete_aws_resources(get_func, attr, opts): for item in get_func(): @@ -19,6 +20,37 @@ def delete_aws_resources(get_func, attr, opts): if re.search(opts.match_re, val): prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) +def delete_autoscaling_group(get_func, attr, opts): + assumeyes = opts.assumeyes + group_name = None + for item in get_func(): + group_name = getattr(item, attr) + if re.search(opts.match_re, group_name): + if not opts.assumeyes: + assumeyes = raw_input("Delete matching %s? [y/n]: " % (item).lower()) == 'y' + break + if assumeyes and group_name: + groups = asg.get_all_groups(names=[group_name]) + if groups: + group = groups[0] + group.max_size = 0 + group.min_size = 0 + group.desired_capacity = 0 + group.update() + instances = True + while instances: + tmp_groups = asg.get_all_groups(names=[group_name]) + if tmp_groups: + tmp_group = tmp_groups[0] + if not tmp_group.instances: + instances = False + time.sleep(10) + + group.delete() + while len(asg.get_all_groups(names=[group_name])): + time.sleep(5) + print ("Terminated ASG: %s" % group_name) + def delete_aws_eips(get_func, attr, opts): # the file might not be there if the integration test wasn't run @@ -128,7 +160,7 @@ if __name__ == '__main__': delete_aws_resources(aws.get_all_security_groups, 'name', opts) # Delete matching ASGs - delete_aws_resources(asg.get_all_groups, 'name', opts) + delete_autoscaling_group(asg.get_all_groups, 'name', opts) # Delete matching launch configs delete_aws_resources(asg.get_all_launch_configurations, 'name', opts) diff --git a/test/integration/roles/test_ec2_asg/tasks/main.yml b/test/integration/roles/test_ec2_asg/tasks/main.yml index 6c670375d94..091eb2ab2b3 100644 --- a/test/integration/roles/test_ec2_asg/tasks/main.yml +++ b/test/integration/roles/test_ec2_asg/tasks/main.yml @@ -1,31 +1,69 @@ --- # tasks file for test_ec2_asg +# we are using a custom built AMI that runs an apache server to verify +# ELB health checks and perform rolling ASG updates +# this will only work on us-east-1 + # ============================================================ # create and kill an ASG -- name: lookup ami id - ec2_ami_search: distro=ubuntu region={{ ec2_region }} release=trusty - register: ubuntu_image - name: ensure launch config exists ec2_lc: name: "{{ resource_prefix }}-lc" ec2_access_key: "{{ ec2_access_key }}" ec2_secret_key: "{{ ec2_secret_key }}" region: "{{ ec2_region }}" - image_id: "{{ ubuntu_image.ami }}" - instance_type: t1.micro -- name: launch asg + image_id: ami-964a0efe + instance_type: t2.micro + +- name: launch asg and wait for instances to be deemed healthy (no ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 + min_size: 1 + max_size: 1 + region: "{{ ec2_region }}" + state: present + wait_for_instances: yes + register: output + +- assert: + that: + - "output.viable_instances == 1" + +# - name: pause for a bit to make sure that the group can't be trivially deleted +# pause: seconds=30 +- name: kill asg + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + state: absent + async: 300 + + +- name: launch asg and do not wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" ec2_access_key: "{{ ec2_access_key }}" ec2_secret_key: "{{ ec2_secret_key }}" launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 min_size: 1 max_size: 1 region: "{{ ec2_region }}" + wait_for_instances: no state: present -- name: pause for a bit to make sure that the group can't be trivially deleted - pause: seconds=30 + register: output + +- assert: + that: + - "output.viable_instances == 0" + - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" @@ -34,3 +72,139 @@ region: "{{ ec2_region }}" state: absent async: 300 + +- name: launch load balancer + ec2_elb_lb: + name: "{{ resource_prefix }}-lb" + region: "{{ ec2_region }}" + state: present + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + connection_draining_timeout: 60 + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 3 + healthy_threshold: 3 + register: load_balancer + + +- name: launch asg and wait for instances to be deemed healthy (ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 1 + min_size: 1 + max_size: 1 + health_check_period: 120 + load_balancers: "{{ resource_prefix }}-lb" + region: "{{ ec2_region }}" + wait_for_instances: yes + wait_timeout: 600 + state: present + register: output + +- assert: + that: + - "output.viable_instances == 1" + + +# grow scaling group to 3 + +- name: add 2 more instances wait for instances to be deemed healthy (ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 120 + load_balancers: ec2-asg-int-test + region: "{{ ec2_region }}" + wait_for_instances: yes + wait_timeout: 600 + state: present + register: output + +- assert: + that: + - "output.viable_instances == 3" + +# # create new launch config with alternate AMI + +- name: ensure launch config exists + ec2_lc: + name: "{{ resource_prefix }}-lc-2" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + image_id: ami-2a4a0e42 + instance_type: t2.micro + + +# # perform rolling replace + +- name: perform rolling update to new AMI + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc-2" + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 120 + load_balancers: ec2-asg-int-test + region: "{{ ec2_region }}" + wait_for_instances: yes + replace_all_instances: yes + wait_timeout: 600 + state: present + register: output + +# ensure that all instances have new launch config +- assert: + that: + - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'" + with_dict: output.instance_facts + +# assert they are all healthy +- assert: + that: + - "output.viable_instances >= 3" + + +- name: kill asg + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + state: absent + async: 300 \ No newline at end of file