From 6b7ea5078f3d12a2744bd0cfa4d2fd53dd44b6de Mon Sep 17 00:00:00 2001 From: yaakov kuperman Date: Thu, 30 Aug 2018 10:47:28 -0400 Subject: [PATCH] [aws] New module: elb_target_facts (#43565) * initial source of the elb_target_facts module, which gathers all registered ELBv2 target groups for a given instance id * updating * initial dump of tests * updating docs, adding AWSRetry decorators * updating tests * More brush up, some refactoring * updating for sanity tests * removing extra space * updating elb_target_facts to use a new name for the return value, not return a fact, and use instance variables for the AWS connections. updating tests to go along with that * updating classes to be 'new-style' classes --- .../modules/cloud/amazon/elb_target_facts.py | 434 ++++++++++++++ .../targets/elb_target_facts/aliases | 2 + .../elb_target_facts/playbooks/full_test.yml | 5 + .../roles/elb_target_facts/defaults/main.yml | 7 + .../roles/elb_target_facts/tasks/main.yml | 532 ++++++++++++++++++ .../targets/elb_target_facts/runme.sh | 6 + 6 files changed, 986 insertions(+) create mode 100644 lib/ansible/modules/cloud/amazon/elb_target_facts.py create mode 100644 test/integration/targets/elb_target_facts/aliases create mode 100644 test/integration/targets/elb_target_facts/playbooks/full_test.yml create mode 100644 test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/defaults/main.yml create mode 100644 test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/tasks/main.yml create mode 100755 test/integration/targets/elb_target_facts/runme.sh diff --git a/lib/ansible/modules/cloud/amazon/elb_target_facts.py b/lib/ansible/modules/cloud/amazon/elb_target_facts.py new file mode 100644 index 00000000000..ad162fee31e --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/elb_target_facts.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Yaakov Kuperman +# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community"} + + +DOCUMENTATION = """ +--- +module: elb_target_facts +short_description: Gathers which target groups a target is associated with. +description: + - This module will search through every target group in a region to find + which ones have registered a given instance ID or IP. + +version_added: "2.7" +author: "Yaakov Kuperman (@yaakov-github)" +options: + instance_id: + description: + - What instance ID to get facts for. + type: str + required: true + get_unused_target_groups: + description: + - Whether or not to get target groups not used by any load balancers. + type: bool + default: true + +requirements: + - boto3 + - botocore +extends_documentation_fragment: + - aws + - ec2 +""" + +EXAMPLES = """ +# practical use case - dynamically deregistering and reregistering nodes + + - name: Get EC2 Metadata + action: ec2_metadata_facts + + - name: Get initial list of target groups + delegate_to: localhost + elb_target_facts: + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_facts + + - name: save fact for later + set_fact: + original_tgs: "{{ target_facts.instance_target_groups }}" + + - name: Deregister instance from all target groups + delegate_to: localhost + elb_target: + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: absent + target_status: "draining" + region: "{{ ansible_ec2_placement_region }}" + with_subelements: + - "{{ original_tgs }}" + - "targets" + + # This avoids having to wait for 'elb_target' to serially deregister each + # target group. An alternative would be to run all of the 'elb_target' + # tasks async and wait for them to finish. + + - name: wait for all targets to deregister simultaneously + delegate_to: localhost + elb_target_facts: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_facts + until: (target_facts.instance_target_groups | length) == 0 + retries: 60 + delay: 10 + + - name: reregister in elbv2s + elb_target: + region: "{{ ansible_ec2_placement_region }}" + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: present + target_status: "initial" + with_subelements: + - "{{ original_tgs }}" + - "targets" + + # wait until all groups associated with this instance are 'healthy' or + # 'unused' + - name: wait for registration + elb_target_facts: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_facts + until: (target_facts.instance_target_groups | + map(attribute='targets') | + flatten | + map(attribute='target_health') | + rejectattr('state', 'equalto', 'healthy') | + rejectattr('state', 'equalto', 'unused') | + list | + length) == 0 + retries: 61 + delay: 10 + +# using the target groups to generate AWS CLI commands to reregister the +# instance - useful in case the playbook fails mid-run and manual +# rollback is required + - name: "reregistration commands: ELBv2s" + debug: + msg: > + aws --region {{ansible_ec2_placement_region}} elbv2 + register-targets --target-group-arn {{item.target_group_arn}} + --targets{%for target in item.targets%} + Id={{target.target_id}}, + Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}} + {%endif%} + {%endfor%} + with_items: "{{target_facts.instance_target_groups}}" + +""" + +RETURN = """ +instance_target_groups: + description: a list of target groups to which the instance is registered to + returned: always + type: complex + contains: + target_group_arn: + description: The ARN of the target group + type: string + returned: always + sample: + - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef" + target_group_type: + description: Which target type is used for this group + returned: always + type: string + sample: + - ip + - instance + targets: + description: A list of targets that point to this instance ID + returned: always + type: complex + contains: + target_id: + description: the target ID referiing to this instance + type: str + returned: always + sample: + - i-deadbeef + - 1.2.3.4 + target_port: + description: which port this target is listening on + type: str + returned: always + sample: + - 80 + target_az: + description: which availability zone is explicitly + associated with this target + type: str + returned: when an AZ is associated with this instance + sample: + - us-west-2a + target_health: + description: the target health description + (see U(https://boto3.readthedocs.io/en/latest/ + reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health)) + for all possible values + returned: always + type: complex + contains: + description: + description: description of target health + returned: if I(state!=present) + sample: + - "Target desregistration is in progress" + reason: + description: reason code for target health + returned: if I(state!=healthy) + sample: + - "Target.Deregistration in progress" + state: + description: health state + returned: always + sample: + - "healthy" + - "draining" + - "initial" + - "unhealthy" + - "unused" + - "unavailable" +""" + +__metaclass__ = type + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + # we can handle the lack of boto3 based on the ec2 module + pass + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import (HAS_BOTO3, camel_dict_to_snake_dict, + AWSRetry) + + +class Target(object): + """Models a target in a target group""" + def __init__(self, target_id, port, az, raw_target_health): + self.target_port = port + self.target_id = target_id + self.target_az = az + self.target_health = self.convert_target_health(raw_target_health) + + def convert_target_health(self, raw_target_health): + return camel_dict_to_snake_dict(raw_target_health) + + +class TargetGroup(object): + """Models an elbv2 target group""" + + def __init__(self, **kwargs): + self.target_group_type = kwargs["target_group_type"] + self.target_group_arn = kwargs["target_group_arn"] + # the relevant targets associated with this group + self.targets = [] + + def add_target(self, target_id, target_port, target_az, raw_target_health): + self.targets.append(Target(target_id, + target_port, + target_az, + raw_target_health)) + + def to_dict(self): + object_dict = vars(self) + object_dict["targets"] = [vars(each) for each in self.get_targets()] + return object_dict + + def get_targets(self): + return list(self.targets) + + +class TargetFactsGatherer(object): + + def __init__(self, module, instance_id, get_unused_target_groups): + self.module = module + try: + self.ec2 = self.module.client( + "ec2", + retry_decorator=AWSRetry.jittered_backoff(retries=10) + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, + msg="Couldn't connect to ec2" + ) + + try: + self.elbv2 = self.module.client( + "elbv2", + retry_decorator=AWSRetry.jittered_backoff(retries=10) + ) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not connect to elbv2" + ) + + self.instance_id = instance_id + self.get_unused_target_groups = get_unused_target_groups + self.tgs = self._get_target_groups() + + def _get_instance_ips(self): + """Fetch all IPs associated with this instance so that we can determine + whether or not an instance is in an IP-based target group""" + try: + # get ahold of the instance in the API + reservations = self.ec2.describe_instances( + InstanceIds=[self.instance_id], + aws_retry=True + )["Reservations"] + except (BotoCoreError, ClientError) as e: + # typically this will happen if the instance doesn't exist + self.module.fail_json_aws(e, + msg="Could not get instance info" + + " for instance '%s'" % + (self.instance_id) + ) + + if len(reservations) < 1: + self.module.fail_json( + msg="Instance ID %s could not be found" % self.instance_id + ) + + instance = reservations[0]["Instances"][0] + + # IPs are represented in a few places in the API, this should + # account for all of them + ips = set() + ips.add(instance["PrivateIpAddress"]) + for nic in instance["NetworkInterfaces"]: + ips.add(nic["PrivateIpAddress"]) + for ip in nic["PrivateIpAddresses"]: + ips.add(ip["PrivateIpAddress"]) + + return list(ips) + + def _get_target_group_objects(self): + """helper function to build a list of TargetGroup objects based on + the AWS API""" + try: + paginator = self.elbv2.get_paginator( + "describe_target_groups" + ) + tg_response = paginator.paginate().build_full_result() + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not describe target" + + " groups" + ) + + # build list of TargetGroup objects representing every target group in + # the system + target_groups = [] + for each_tg in tg_response["TargetGroups"]: + if not self.get_unused_target_groups and \ + len(each_tg["LoadBalancerArns"]) < 1: + # only collect target groups that actually are connected + # to LBs + continue + + target_groups.append( + TargetGroup(target_group_arn=each_tg["TargetGroupArn"], + target_group_type=each_tg["TargetType"], + ) + ) + return target_groups + + def _get_target_descriptions(self, target_groups): + """Helper function to build a list of all the target descriptions + for this target in a target group""" + # Build a list of all the target groups pointing to this instance + # based on the previous list + tgs = set() + # Loop through all the target groups + for tg in target_groups: + try: + # Get the list of targets for that target group + response = self.elbv2.describe_target_health( + TargetGroupArn=tg.target_group_arn, + aws_retry=True + ) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not describe target " + + "health for target group %s" % + tg.target_group_arn + ) + + for t in response["TargetHealthDescriptions"]: + # If the target group has this instance as a target, add to + # list. This logic also accounts for the possibility of a + # target being in the target group multiple times with + # overridden ports + if t["Target"]["Id"] == self.instance_id or \ + t["Target"]["Id"] in self.instance_ips: + + # The 'AvailabilityZone' parameter is a weird one, see the + # API docs for more. Basically it's only supposed to be + # there under very specific circumstances, so we need + # to account for that + az = t["Target"]["AvailabilityZone"] \ + if "AvailabilityZone" in t["Target"] \ + else None + + tg.add_target(t["Target"]["Id"], + t["Target"]["Port"], + az, + t["TargetHealth"]) + # since tgs is a set, each target group will be added only + # once, even though we call add on each successful match + tgs.add(tg) + return list(tgs) + + def _get_target_groups(self): + # do this first since we need the IPs later on in this function + self.instance_ips = self._get_instance_ips() + + # build list of target groups + target_groups = self._get_target_group_objects() + return self._get_target_descriptions(target_groups) + + +def main(): + argument_spec = dict( + instance_id={"required": True, "type": "str"}, + get_unused_target_groups={"required": False, + "default": True, "type": "bool"} + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + instance_id = module.params["instance_id"] + get_unused_target_groups = module.params["get_unused_target_groups"] + + tg_gatherer = TargetFactsGatherer(module, + instance_id, + get_unused_target_groups + ) + + instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] + + module.exit_json(instance_target_groups=instance_target_groups) + + +if __name__ == "__main__": + main() diff --git a/test/integration/targets/elb_target_facts/aliases b/test/integration/targets/elb_target_facts/aliases new file mode 100644 index 00000000000..56927195182 --- /dev/null +++ b/test/integration/targets/elb_target_facts/aliases @@ -0,0 +1,2 @@ +cloud/aws +unsupported diff --git a/test/integration/targets/elb_target_facts/playbooks/full_test.yml b/test/integration/targets/elb_target_facts/playbooks/full_test.yml new file mode 100644 index 00000000000..6364c620c5a --- /dev/null +++ b/test/integration/targets/elb_target_facts/playbooks/full_test.yml @@ -0,0 +1,5 @@ +- hosts: localhost + connection: local + + roles: + - elb_target_facts diff --git a/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/defaults/main.yml b/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/defaults/main.yml new file mode 100644 index 00000000000..75df402a02d --- /dev/null +++ b/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/defaults/main.yml @@ -0,0 +1,7 @@ +--- +ec2_ami_image: + us-east-1: ami-8c1be5f6 + us-east-2: ami-c5062ba0 + +tg_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg" +lb_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-lb" diff --git a/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/tasks/main.yml b/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/tasks/main.yml new file mode 100644 index 00000000000..6fb307daca7 --- /dev/null +++ b/test/integration/targets/elb_target_facts/playbooks/roles/elb_target_facts/tasks/main.yml @@ -0,0 +1,532 @@ +--- + - name: set up elb_target_facts test prerequisites + + block: + + - name: + debug: msg="********** Setting up elb_target_facts test dependencies **********" + # ============================================================ + + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + # ============================================================ + + - name: set up testing VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: 20.0.0.0/16 + <<: *aws_connection_info + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc + + - name: set up testing internet gateway + ec2_vpc_igw: + vpc_id: "{{ vpc.vpc.id }}" + state: present + <<: *aws_connection_info + register: igw + + - name: set up testing subnet + ec2_vpc_subnet: + state: present + vpc_id: "{{ vpc.vpc.id }}" + cidr: 20.0.0.0/18 + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet" + <<: *aws_connection_info + register: subnet_1 + + - name: set up testing subnet + ec2_vpc_subnet: + state: present + vpc_id: "{{ vpc.vpc.id }}" + cidr: 20.0.64.0/18 + az: "{{ aws_region }}b" + resource_tags: + Name: "{{ resource_prefix }}-subnet" + <<: *aws_connection_info + register: subnet_2 + + - name: create routing rules + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ subnet_1.subnet.id }}" + - "{{ subnet_2.subnet.id }}" + <<: *aws_connection_info + register: route_table + + - name: create testing security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + <<: *aws_connection_info + register: sg + + - name: set up testing target group (type=instance) + register: alb_target_group + elb_target_group: + name: "{{ tg_name }}-inst" + health_check_port: 80 + protocol: http + port: 80 + vpc_id: '{{ vpc.vpc.id }}' + state: present + target_type: instance + # set this to 30 to test polling for changes, instead of having everything go out immediately + deregistration_delay_timeout: 30 + tags: + Description: "Created by {{ resource_prefix }}" + <<: *aws_connection_info + + - name: set up testing target group (type=ip) + register: nlb_target_group + elb_target_group: + name: "{{ tg_name }}-ip" + health_check_port: 80 + protocol: tcp + port: 80 + vpc_id: '{{ vpc.vpc.id }}' + state: present + # set this to 30 to test polling for changes, instead of having everything go out immediately + deregistration_delay_timeout: 30 + target_type: ip + tags: + Description: "Created by {{ resource_prefix }}" + <<: *aws_connection_info + + - name: set up testing target group which will not be associated with any load balancers + register: idle_target_group + elb_target_group: + name: "{{ tg_name }}-idle" + health_check_port: 80 + protocol: tcp + port: 80 + vpc_id: '{{ vpc.vpc.id }}' + state: present + target_type: instance + tags: + Description: "Created by {{ resource_prefix }}" + <<: *aws_connection_info + + - name: set up ec2 instance to use as a target + ec2: + group_id: "{{ sg.group_id }}" + instance_type: t2.micro + image: "{{ ec2_ami_image[aws_region] }}" + vpc_subnet_id: "{{ subnet_2.subnet.id }}" + instance_tags: + Name: "{{ resource_prefix }}-inst" + exact_count: 1 + count_tag: + Name: "{{ resource_prefix }}-inst" + assign_public_ip: true + volumes: [] + wait: true + ebs_optimized: false + user_data: | + #cloud-config + package_upgrade: true + package_update: true + packages: + - httpd + runcmd: + - "service httpd start" + - echo "HELLO ANSIBLE" > /var/www/html/index.html + <<: *aws_connection_info + register: ec2 + + - name: create an application load balancer + elb_application_lb: + name: "{{ lb_name }}-alb" + security_groups: + - "{{ sg.group_id }}" + subnets: + - "{{ subnet_1.subnet.id }}" + - "{{ subnet_2.subnet.id }}" + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}-inst" + state: present + <<: *aws_connection_info + + + - name: create a network load balancer + elb_network_lb: + name: "{{ lb_name }}-nlb" + subnets: + - "{{ subnet_1.subnet.id }}" + - "{{ subnet_2.subnet.id }}" + listeners: + - Protocol: TCP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}-ip" + state: present + <<: *aws_connection_info + + - name: register with the ALB + elb_target: + target_group_name: "{{ tg_name }}-inst" + target_id: "{{ ec2.instance_ids[0] }}" + state: present + target_status: "initial" + <<: *aws_connection_info + + - name: register with the NLB IP target group + elb_target: + target_group_name: "{{ tg_name }}-ip" + target_id: "{{ ec2.instances[0].private_ip }}" + state: present + target_status: "initial" + <<: *aws_connection_info + + # ============================================================ + + - debug: msg="********** Running elb_target_facts integration tests **********" + + # ============================================================ + - name: gather facts + elb_target_facts: + instance_id: "{{ ec2.instance_ids[0]}}" + <<: *aws_connection_info + register: target_facts + + - assert: + that: + - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - (target_facts.instance_target_groups | length) == 2 + msg: "target facts showed the target in the right target groups" + + + - name: register with unused target group + elb_target: + target_group_name: "{{ tg_name }}-idle" + target_id: "{{ ec2.instance_ids[0]}}" + state: present + target_status: "unused" + <<: *aws_connection_info + + - name: gather facts again, including the idle group + elb_target_facts: + instance_id: "{{ ec2.instance_ids[0]}}" + <<: *aws_connection_info + register: target_facts + + - assert: + that: + - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - (target_facts.instance_target_groups | length) == 3 + msg: "target facts reflected the addition of the target to the idle group" + + - name: gather facts again, this time excluding the idle group + elb_target_facts: + instance_id: "{{ ec2.instance_ids[0]}}" + get_unused_target_groups: false + <<: *aws_connection_info + register: target_facts + + - assert: + that: + - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - (target_facts.instance_target_groups | length) == 2 + msg: "target_facts.instance_target_groups did not gather unused target groups when variable was set" + + - name: register twice in the same target group + elb_target: + target_group_name: "{{ tg_name }}-ip" + target_port: 22 + target_id: "{{ ec2.instances[0].private_ip }}" + state: present + target_status: "healthy" + target_status_timeout: 300 + <<: *aws_connection_info + + - name: gather facts + elb_target_facts: + instance_id: "{{ ec2.instance_ids[0] }}" + get_unused_target_groups: false + <<: *aws_connection_info + register: target_facts + + - assert: + that: + - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) + - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) + - (target_facts.instance_target_groups | length) == 2 + - (target_facts.instance_target_groups | + selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) | + map(attribute='targets') | + flatten | + list | + length) == 2 + msg: "registering a target twice didn't affect the overall target group count, increased target count" + + - set_fact: + original_target_groups: "{{ target_facts.instance_target_groups }}" + + - name: Deregister instance from all target groups + elb_target: + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: absent + target_status: "draining" + <<: *aws_connection_info + with_subelements: + - "{{ original_target_groups }}" + - "targets" + + - name: wait for all targets to deregister simultaneously + elb_target_facts: + get_unused_target_groups: false + instance_id: "{{ ec2.instance_ids[0] }}" + <<: *aws_connection_info + register: target_facts + until: (target_facts.instance_target_groups | length) == 0 + retries: 60 + delay: 10 + + - name: reregister in elbv2s + elb_target: + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: present + target_status: "initial" + <<: *aws_connection_info + with_subelements: + - "{{ original_target_groups }}" + - "targets" + + # wait until all groups associated with this instance are 'healthy' or + # 'unused' + - name: wait for registration + elb_target_facts: + get_unused_target_groups: false + instance_id: "{{ ec2.instance_ids[0] }}" + <<: *aws_connection_info + register: target_facts + until: > + (target_facts.instance_target_groups | + map(attribute='targets') | + flatten | + map(attribute='target_health') | + rejectattr('state', 'equalto', 'healthy') | + rejectattr('state', 'equalto', 'unused') | + list | + length) == 0 + retries: 61 + delay: 10 + + - assert: + that: + - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) + - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) + - (target_facts.instance_target_groups | length) == 2 + - (target_facts.instance_target_groups | + selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) | + map(attribute='targets') | + flatten | + list | + length) == 2 + msg: "reregistration completed successfully" + + always: + + - name: + debug: msg="********** Tearing down elb_target_facts test dependencies **********" + + - name: remove ec2 instance + ec2: + group_id: "{{ sg.group_id }}" + instance_type: t2.micro + image: "{{ ec2_ami_image[aws_region] }}" + vpc_subnet_id: "{{ subnet_2.subnet.id }}" + instance_tags: + Name: "{{ resource_prefix }}-inst" + exact_count: 0 + count_tag: + Name: "{{ resource_prefix }}-inst" + assign_public_ip: true + volumes: [] + wait: true + ebs_optimized: false + <<: *aws_connection_info + ignore_errors: true + + - name: remove application load balancer + elb_application_lb: + name: "{{ lb_name }}-alb" + security_groups: + - "{{ sg.group_id }}" + subnets: + - "{{ subnet_1.subnet.id }}" + - "{{ subnet_2.subnet.id }}" + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}-inst" + state: absent + wait: true + wait_timeout: 200 + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove NLB + ignore_errors: true + elb_network_lb: + name: "{{ lb_name }}-nlb" + state: absent + <<: *aws_connection_info + + - name: remove testing target groups + elb_target_group: + name: "{{ item }}" + health_check_port: 80 + protocol: http + port: 80 + vpc_id: '{{ vpc.vpc.id }}' + state: absent + target_type: instance + tags: + Description: "Created by {{ resource_prefix }}" + wait: true + wait_timeout: 200 + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + with_items: + - "{{ tg_name }}-idle" + - "{{ tg_name }}-ip" + - "{{ tg_name }}-inst" + ignore_errors: true + + - name: remove testing security group + ec2_group: + state: absent + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove routing rules + ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: "{{ route_table.route_table.id }}" + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove testing subnet + ec2_vpc_subnet: + state: absent + vpc_id: "{{ vpc.vpc.id }}" + cidr: 20.0.0.0/18 + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet" + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove testing subnet + ec2_vpc_subnet: + state: absent + vpc_id: "{{ vpc.vpc.id }}" + cidr: 20.0.64.0/18 + az: "{{ aws_region }}b" + resource_tags: + Name: "{{ resource_prefix }}-subnet" + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove testing internet gateway + ec2_vpc_igw: + vpc_id: "{{ vpc.vpc.id }}" + state: absent + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + ignore_errors: true + + - name: remove testing VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: absent + cidr_block: 20.0.0.0/16 + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + <<: *aws_connection_info + register: removed + retries: 10 + until: removed is not failed + + # ============================================================ diff --git a/test/integration/targets/elb_target_facts/runme.sh b/test/integration/targets/elb_target_facts/runme.sh new file mode 100755 index 00000000000..736aacd06a1 --- /dev/null +++ b/test/integration/targets/elb_target_facts/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# We don't set -u here, due to pypa/virtualenv#150 +set -ex + +ansible-playbook -i ../../inventory -e @../../integration_config.yml -e @../../cloud-config-aws.yml -v playbooks/full_test.yml "$@"