From 46e5c48f038751ef6df27ccddcfb6845b84f8f0d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 Sep 2017 10:06:14 -0700 Subject: [PATCH] Split ec2_elb_* modules in service of rename/interface changes (#30532) * Undeprecate ec2_elb_* * Make ec2_elb* full fledged modules rather than aliases * Split tests for ec2_elb_lb and elb_classicb_lb * Change names in documentation of old and new elb modules Add tests for ec2_elb_lb --- lib/ansible/modules/cloud/amazon/_ec2_elb.py | 1 - .../modules/cloud/amazon/_ec2_elb_facts.py | 1 - .../modules/cloud/amazon/_ec2_elb_lb.py | 1 - lib/ansible/modules/cloud/amazon/ec2_elb.py | 377 +++++ .../modules/cloud/amazon/ec2_elb_facts.py | 266 ++++ .../modules/cloud/amazon/ec2_elb_lb.py | 1374 +++++++++++++++++ .../modules/cloud/amazon/elb_classic_lb.py | 58 +- .../cloud/amazon/elb_classic_lb_facts.py | 19 +- .../modules/cloud/amazon/elb_instance.py | 10 +- test/integration/targets/ec2_elb_lb/aliases | 2 + .../targets/ec2_elb_lb/defaults/main.yml | 3 + .../targets/ec2_elb_lb/meta/main.yml | 3 + .../targets/ec2_elb_lb/tasks/main.yml | 419 +++++ .../targets/ec2_elb_lb/vars/main.yml | 2 + .../targets/elb_classic_lb/tasks/main.yml | 30 +- test/sanity/pep8/legacy-files.txt | 6 +- 16 files changed, 2503 insertions(+), 69 deletions(-) delete mode 120000 lib/ansible/modules/cloud/amazon/_ec2_elb.py delete mode 120000 lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py delete mode 120000 lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py create mode 100644 lib/ansible/modules/cloud/amazon/ec2_elb.py create mode 100644 lib/ansible/modules/cloud/amazon/ec2_elb_facts.py create mode 100644 lib/ansible/modules/cloud/amazon/ec2_elb_lb.py create mode 100644 test/integration/targets/ec2_elb_lb/aliases create mode 100644 test/integration/targets/ec2_elb_lb/defaults/main.yml create mode 100644 test/integration/targets/ec2_elb_lb/meta/main.yml create mode 100644 test/integration/targets/ec2_elb_lb/tasks/main.yml create mode 100644 test/integration/targets/ec2_elb_lb/vars/main.yml diff --git a/lib/ansible/modules/cloud/amazon/_ec2_elb.py b/lib/ansible/modules/cloud/amazon/_ec2_elb.py deleted file mode 120000 index d45648c590c..00000000000 --- a/lib/ansible/modules/cloud/amazon/_ec2_elb.py +++ /dev/null @@ -1 +0,0 @@ -elb_instance.py \ No newline at end of file diff --git a/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py deleted file mode 120000 index ffa3692d0de..00000000000 --- a/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py +++ /dev/null @@ -1 +0,0 @@ -elb_classic_lb_facts.py \ No newline at end of file diff --git a/lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py b/lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py deleted file mode 120000 index 908d95746dc..00000000000 --- a/lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py +++ /dev/null @@ -1 +0,0 @@ -elb_classic_lb.py \ No newline at end of file diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb.py b/lib/ansible/modules/cloud/amazon/ec2_elb.py new file mode 100644 index 00000000000..c1d26b29e2b --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/ec2_elb.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'certified'} + + +DOCUMENTATION = """ +--- +module: ec2_elb +short_description: De-registers or registers instances from EC2 ELBs +description: + - This module de-registers or registers an AWS EC2 instance from the ELBs + that it belongs to. + - Returns fact "ec2_elbs" which is a list of elbs attached to the instance + if state=absent is passed as an argument. + - Will be marked changed when called only if there are ELBs found to operate on. +version_added: "1.2" +author: "John Jarvis (@jarv)" +options: + state: + description: + - register or deregister the instance + required: true + choices: ['present', 'absent'] + instance_id: + description: + - EC2 Instance ID + required: true + ec2_elbs: + description: + - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. + required: false + default: None + enable_availability_zone: + description: + - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already + been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. + required: false + default: yes + choices: [ "yes", "no" ] + wait: + description: + - Wait for instance registration or deregistration to complete successfully before returning. + required: false + default: yes + choices: [ "yes", "no" ] + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + wait_timeout: + description: + - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. + If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. + required: false + default: 0 + version_added: "1.6" +extends_documentation_fragment: + - aws + - ec2 +""" + +EXAMPLES = """ +# basic pre_task and post_task example +pre_tasks: + - name: Gathering ec2 facts + action: ec2_facts + - name: Instance De-register + local_action: + module: ec2_elb + instance_id: "{{ ansible_ec2_instance_id }}" + state: absent +roles: + - myrole +post_tasks: + - name: Instance Register + local_action: + module: ec2_elb + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ item }}" + state: present + with_items: "{{ ec2_elbs }}" +""" + +import time + +try: + import boto + import boto.ec2 + import boto.ec2.autoscale + import boto.ec2.elb + from boto.regioninfo import RegionInfo + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, + get_aws_connection_info) + + +class ElbManager: + """Handles EC2 instance ELB registration and de-registration""" + + def __init__(self, module, instance_id=None, ec2_elbs=None, + region=None, **aws_connect_params): + self.module = module + self.instance_id = instance_id + self.region = region + self.aws_connect_params = aws_connect_params + self.lbs = self._get_instance_lbs(ec2_elbs) + self.changed = False + + def deregister(self, wait, timeout): + """De-register the instance from all ELBs and wait for the ELB + to report it out-of-service""" + + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + if initial_state is None: + # Instance isn't registered with this load + # balancer. Ignore it and try the next one. + continue + + lb.deregister_instances([self.instance_id]) + + # The ELB is changing state in some way. Either an instance that's + # InService is moving to OutOfService, or an instance that's + # already OutOfService is being deregistered. + self.changed = True + + if wait: + self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) + + def register(self, wait, enable_availability_zone, timeout): + """Register the instance for all ELBs and wait for the ELB + to report the instance in-service""" + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + + if enable_availability_zone: + self._enable_availailability_zone(lb) + + lb.register_instances([self.instance_id]) + + if wait: + self._await_elb_instance_state(lb, 'InService', initial_state, timeout) + else: + # We cannot assume no change was made if we don't wait + # to find out + self.changed = True + + def exists(self, lbtest): + """ Verify that the named ELB actually exists """ + + found = False + for lb in self.lbs: + if lb.name == lbtest: + found=True + break + return found + + def _enable_availailability_zone(self, lb): + """Enable the current instance's availability zone in the provided lb. + Returns True if the zone was enabled or False if no change was made. + lb: load balancer""" + instance = self._get_instance() + if instance.placement in lb.availability_zones: + return False + + lb.enable_zones(zones=instance.placement) + + # If successful, the new zone will have been added to + # lb.availability_zones + return instance.placement in lb.availability_zones + + def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): + """Wait for an ELB to change state + lb: load balancer + awaited_state : state to poll for (string)""" + + wait_timeout = time.time() + timeout + while True: + instance_state = self._get_instance_health(lb) + + if not instance_state: + msg = ("The instance %s could not be put in service on %s." + " Reason: Invalid Instance") + self.module.fail_json(msg=msg % (self.instance_id, lb)) + + if instance_state.state == awaited_state: + # Check the current state against the initial state, and only set + # changed if they are different. + if (initial_state is None) or (instance_state.state != initial_state.state): + self.changed = True + break + elif self._is_instance_state_pending(instance_state): + # If it's pending, we'll skip further checks and continue waiting + pass + elif (awaited_state == 'InService' + and instance_state.reason_code == "Instance" + and time.time() >= wait_timeout): + # If the reason_code for the instance being out of service is + # "Instance" this indicates a failure state, e.g. the instance + # has failed a health check or the ELB does not have the + # instance's availability zone enabled. The exact reason why is + # described in InstantState.description. + msg = ("The instance %s could not be put in service on %s." + " Reason: %s") + self.module.fail_json(msg=msg % (self.instance_id, + lb, + instance_state.description)) + time.sleep(1) + + def _is_instance_state_pending(self, instance_state): + """ + Determines whether the instance_state is "pending", meaning there is + an operation under way to bring it in service. + """ + # This is messy, because AWS provides no way to distinguish between + # an instance that is is OutOfService because it's pending vs. OutOfService + # because it's failing health checks. So we're forced to analyze the + # description, which is likely to be brittle. + return (instance_state and 'pending' in instance_state.description) + + def _get_instance_health(self, lb): + """ + Check instance health, should return status object or None under + certain error conditions. + """ + try: + status = lb.get_instance_health([self.instance_id])[0] + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstance': + return None + else: + raise + return status + + def _get_instance_lbs(self, ec2_elbs=None): + """Returns a list of ELBs attached to self.instance_id + ec2_elbs: an optional list of elb names that will be used + for elb lookup instead of returning what elbs + are attached to self.instance_id""" + + if not ec2_elbs: + ec2_elbs = self._get_auto_scaling_group_lbs() + + try: + elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + elbs = [] + marker = None + while True: + try: + newelbs = elb.get_all_load_balancers(marker=marker) + marker = newelbs.next_marker + elbs.extend(newelbs) + if not marker: + break + except TypeError: + # Older version of boto do not allow for params + elbs = elb.get_all_load_balancers() + break + + if ec2_elbs: + lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) + else: + lbs = [] + for lb in elbs: + for info in lb.instances: + if self.instance_id == info.id: + lbs.append(lb) + return lbs + + def _get_auto_scaling_group_lbs(self): + """Returns a list of ELBs associated with self.instance_id + indirectly through its auto scaling group membership""" + + try: + asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) + if len(asg_instances) > 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") + + if not asg_instances: + asg_elbs = [] + else: + asg_name = asg_instances[0].group_name + + asgs = asg.get_all_groups([asg_name]) + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + + asg_elbs = asgs[0].load_balancers + + return asg_elbs + + def _get_instance(self): + """Returns a boto.ec2.InstanceObject for self.instance_id""" + try: + ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + return ec2.get_only_instances(instance_ids=[self.instance_id])[0] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True}, + instance_id={'required': True}, + ec2_elbs={'default': None, 'required': False, 'type':'list'}, + enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, + wait={'required': False, 'default': True, 'type': 'bool'}, + wait_timeout={'required': False, 'default': 0, 'type': 'int'} + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ec2_elbs = module.params['ec2_elbs'] + wait = module.params['wait'] + enable_availability_zone = module.params['enable_availability_zone'] + timeout = module.params['wait_timeout'] + + if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + module.fail_json(msg="ELBs are required for registration") + + instance_id = module.params['instance_id'] + elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) + + if ec2_elbs is not None: + for elb in ec2_elbs: + if not elb_man.exists(elb): + msg="ELB %s does not exist" % elb + module.fail_json(msg=msg) + + if module.params['state'] == 'present': + elb_man.register(wait, enable_availability_zone, timeout) + elif module.params['state'] == 'absent': + elb_man.deregister(wait, timeout) + + ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} + ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py b/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py new file mode 100644 index 00000000000..ead2a9bb418 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_elb_facts +short_description: Gather facts about EC2 Elastic Load Balancers in AWS +description: + - Gather facts about EC2 Elastic Load Balancers in AWS +version_added: "2.0" +author: + - "Michael Schultz (github.com/mjschultz)" + - "Fernando Jose Pando (@nand0p)" +options: + names: + description: + - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned. + required: false + default: null + aliases: ['elb_ids', 'ec2_elbs'] +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Output format tries to match ec2_elb_lb module input parameters + +# Gather facts about all ELBs +- action: + module: ec2_elb_facts + register: elb_facts + +- action: + module: debug + msg: "{{ item.dns_name }}" + with_items: "{{ elb_facts.elbs }}" + +# Gather facts about a particular ELB +- action: + module: ec2_elb_facts + names: frontend-prod-elb + register: elb_facts + +- action: + module: debug + msg: "{{ elb_facts.elbs.0.dns_name }}" + +# Gather facts about a set of ELBs +- action: + module: ec2_elb_facts + names: + - frontend-prod-elb + - backend-prod-elb + register: elb_facts + +- action: + module: debug + msg: "{{ item.dns_name }}" + with_items: "{{ elb_facts.elbs }}" + +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ( + AWSRetry, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, +) + +try: + import boto.ec2.elb + from boto.ec2.tag import Tag + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +class ElbInformation(object): + """Handles ELB information.""" + + def __init__(self, + module, + names, + region, + **aws_connect_params): + + self.module = module + self.names = names + self.region = region + self.aws_connect_params = aws_connect_params + self.connection = self._get_elb_connection() + + def _get_tags(self, elbname): + params = {'LoadBalancerNames.member.1': elbname} + elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) + return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def _get_elb_connection(self): + return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + + def _get_elb_listeners(self, listeners): + listener_list = [] + + for listener in listeners: + listener_dict = { + 'load_balancer_port': listener[0], + 'instance_port': listener[1], + 'protocol': listener[2], + } + + try: + ssl_certificate_id = listener[4] + except IndexError: + pass + else: + if ssl_certificate_id: + listener_dict['ssl_certificate_id'] = ssl_certificate_id + + listener_list.append(listener_dict) + + return listener_list + + def _get_health_check(self, health_check): + protocol, port_path = health_check.target.split(':') + try: + port, path = port_path.split('/', 1) + path = '/{0}'.format(path) + except ValueError: + port = port_path + path = None + + health_check_dict = { + 'ping_protocol': protocol.lower(), + 'ping_port': int(port), + 'response_timeout': health_check.timeout, + 'interval': health_check.interval, + 'unhealthy_threshold': health_check.unhealthy_threshold, + 'healthy_threshold': health_check.healthy_threshold, + } + + if path: + health_check_dict['ping_path'] = path + return health_check_dict + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def _get_elb_info(self, elb): + elb_info = { + 'name': elb.name, + 'zones': elb.availability_zones, + 'dns_name': elb.dns_name, + 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name, + 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id, + 'hosted_zone_name': elb.canonical_hosted_zone_name, + 'hosted_zone_id': elb.canonical_hosted_zone_name_id, + 'instances': [instance.id for instance in elb.instances], + 'listeners': self._get_elb_listeners(elb.listeners), + 'scheme': elb.scheme, + 'security_groups': elb.security_groups, + 'health_check': self._get_health_check(elb.health_check), + 'subnets': elb.subnets, + 'instances_inservice': [], + 'instances_inservice_count': 0, + 'instances_outofservice': [], + 'instances_outofservice_count': 0, + 'instances_inservice_percent': 0.0, + 'tags': self._get_tags(elb.name) + } + + if elb.vpc_id: + elb_info['vpc_id'] = elb.vpc_id + + if elb.instances: + instance_health = self.connection.describe_instance_health(elb.name) + elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService'] + elb_info['instances_inservice_count'] = len(elb_info['instances_inservice']) + elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService'] + elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice']) + try: + elb_info['instances_inservice_percent'] = ( + float(elb_info['instances_inservice_count']) / + float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count']) + ) * 100. + except ZeroDivisionError: + elb_info['instances_inservice_percent'] = 0. + return elb_info + + def list_elbs(self): + elb_array, token = [], None + get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) + while True: + all_elbs = get_elb_with_backoff(marker=token) + token = all_elbs.next_marker + + if all_elbs: + if self.names: + for existing_lb in all_elbs: + if existing_lb.name in self.names: + elb_array.append(existing_lb) + else: + elb_array.extend(all_elbs) + else: + break + + if token is None: + break + + return list(map(self._get_elb_info, elb_array)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + names={'default': [], 'type': 'list'} + ) + ) + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + try: + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") + + names = module.params['names'] + elb_information = ElbInformation( + module, names, region, **aws_connect_params) + + ec2_facts_result = dict(changed=False, + elbs=elb_information.list_elbs()) + + except BotoServerError as err: + module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message), + exception=traceback.format_exc()) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py b/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py new file mode 100644 index 00000000000..c983a602f50 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py @@ -0,0 +1,1374 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'certified'} + + +DOCUMENTATION = """ +--- +module: ec2_elb_lb +description: + - Returns information about the load balancer. + - Will be marked changed when called only if state is changed. +short_description: Creates or destroys Amazon ELB. +version_added: "1.5" +author: + - "Jim Dalton (@jsdalton)" +options: + state: + description: + - Create or destroy the ELB + choices: ["present", "absent"] + required: true + name: + description: + - The name of the ELB + required: true + listeners: + description: + - List of ports/protocols for this ELB to listen on (see example) + required: false + purge_listeners: + description: + - Purge existing listeners on ELB that are not found in listeners + required: false + default: true + instance_ids: + description: + - List of instance ids to attach to this ELB + required: false + default: false + version_added: "2.1" + purge_instance_ids: + description: + - Purge existing instance ids on ELB that are not found in instance_ids + required: false + default: false + version_added: "2.1" + zones: + description: + - List of availability zones to enable on this ELB + required: false + purge_zones: + description: + - Purge existing availability zones on ELB that are not found in zones + required: false + default: false + security_group_ids: + description: + - A list of security groups to apply to the elb + required: false + default: None + version_added: "1.6" + security_group_names: + description: + - A list of security group names to apply to the elb + required: false + default: None + version_added: "2.0" + health_check: + description: + - An associative array of health check configuration settings (see example) + required: false + default: None + access_logs: + description: + - An associative array of access logs configuration settings (see example) + required: false + default: None + version_added: "2.0" + subnets: + description: + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + required: false + default: None + aliases: [] + version_added: "1.7" + purge_subnets: + description: + - Purge existing subnet on ELB that are not found in subnets + required: false + default: false + version_added: "1.7" + scheme: + description: + - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'. + If you choose to update your scheme with a different value the ELB will be destroyed and + recreated. To update scheme you must use the option wait. + choices: ["internal", "internet-facing"] + required: false + default: 'internet-facing' + version_added: "1.7" + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + connection_draining_timeout: + description: + - Wait a specified timeout allowing connections to drain before terminating an instance + required: false + aliases: [] + version_added: "1.8" + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time + required: false + version_added: "2.0" + cross_az_load_balancing: + description: + - Distribute load across all configured Availability Zones + required: false + default: "no" + choices: ["yes", "no"] + aliases: [] + version_added: "1.8" + stickiness: + description: + - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example ) + required: false + version_added: "2.0" + wait: + description: + - When specified, Ansible will check the status of the load balancer to ensure it has been successfully + removed from AWS. + required: false + default: no + choices: ["yes", "no"] + version_added: "2.1" + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for the elb to be terminated. + A maximum of 600 seconds (10 minutes) is allowed. + required: false + default: 60 + version_added: "2.1" + tags: + description: + - An associative array of tags. To delete all tags, supply an empty dict. + required: false + version_added: "2.1" + +extends_documentation_fragment: + - aws + - ec2 +""" + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic provisioning example (non-VPC) + +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + proxy_protocol: True + - protocol: https + load_balancer_port: 443 + instance_protocol: http # optional, defaults to value of protocol setting + instance_port: 80 + # ssl certificate required for https or ssl + ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + +# Internal ELB example + +- local_action: + module: ec2_elb_lb + name: "test-vpc" + scheme: internal + state: present + instance_ids: + - i-abcd1234 + purge_instance_ids: true + subnets: + - subnet-abcd1234 + - subnet-1a2b3c4d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + +# Configure a health check and the access logs +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: present + zones: + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 + access_logs: + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" + +# Ensure ELB is gone +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: absent + +# Ensure ELB is gone and wait for check (for default timeout) +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: absent + wait: yes + +# Ensure ELB is gone and wait for check with timeout value +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: absent + wait: yes + wait_timeout: 600 + +# Normally, this module will purge any listeners that exist on the ELB +# but aren't specified in the listeners parameter. If purge_listeners is +# false it leaves them alone +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_listeners: no + +# Normally, this module will leave availability zones that are enabled +# on the ELB alone. If purge_zones is true, then any extraneous zones +# will be removed +- local_action: + module: ec2_elb_lb + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + +# Creates a ELB and assigns a list of subnets to it. +- local_action: + module: ec2_elb_lb + state: present + name: 'New ELB' + security_group_ids: 'sg-123456, sg-67890' + region: us-west-2 + subnets: 'subnet-123456,subnet-67890' + purge_subnets: yes + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with connection draining, increased idle timeout and cross availability +# zone load balancing +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + connection_draining_timeout: 60 + idle_timeout: 300 + cross_az_load_balancing: "yes" + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with load balancer stickiness enabled +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: loadbalancer + enabled: yes + expiration: 300 + +# Create an ELB with application stickiness enabled +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: application + enabled: yes + cookie: SESSIONID + +# Create an ELB and add tags +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + +# Delete all tags from an ELB +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} +""" + +import random +import time +import traceback + +try: + import boto + import boto.ec2.elb + import boto.ec2.elb.attributes + import boto.vpc + from boto.ec2.elb.healthcheck import HealthCheck + from boto.ec2.tag import Tag + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + + +def _throttleable_operation(max_retries): + def _operation_wrapper(op): + def _do_op(*args, **kwargs): + retry = 0 + while True: + try: + return op(*args, **kwargs) + except boto.exception.BotoServerError as e: + if retry < max_retries and e.code in \ + ("Throttling", "RequestLimitExceeded"): + retry = retry + 1 + time.sleep(min(random.random() * (2 ** retry), 300)) + continue + else: + raise + return _do_op + return _operation_wrapper + +def _get_vpc_connection(module, region, aws_connect_params): + try: + return connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + +_THROTTLING_RETRIES = 5 + +class ElbManager(object): + """Handles ELB creation and destruction""" + + def __init__(self, module, name, listeners=None, purge_listeners=None, + zones=None, purge_zones=None, security_group_ids=None, + health_check=None, subnets=None, purge_subnets=None, + scheme="internet-facing", connection_draining_timeout=None, + idle_timeout=None, + cross_az_load_balancing=None, access_logs=None, + stickiness=None, wait=None, wait_timeout=None, tags=None, + region=None, + instance_ids=None, purge_instance_ids=None, **aws_connect_params): + + self.module = module + self.name = name + self.listeners = listeners + self.purge_listeners = purge_listeners + self.instance_ids = instance_ids + self.purge_instance_ids = purge_instance_ids + self.zones = zones + self.purge_zones = purge_zones + self.security_group_ids = security_group_ids + self.health_check = health_check + self.subnets = subnets + self.purge_subnets = purge_subnets + self.scheme = scheme + self.connection_draining_timeout = connection_draining_timeout + self.idle_timeout = idle_timeout + self.cross_az_load_balancing = cross_az_load_balancing + self.access_logs = access_logs + self.stickiness = stickiness + self.wait = wait + self.wait_timeout = wait_timeout + self.tags = tags + + self.aws_connect_params = aws_connect_params + self.region = region + + self.changed = False + self.status = 'gone' + self.elb_conn = self._get_elb_connection() + + try: + self.elb = self._get_elb() + except boto.exception.BotoServerError as e: + module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc()) + + self.ec2_conn = self._get_ec2_connection() + + @_throttleable_operation(_THROTTLING_RETRIES) + def ensure_ok(self): + """Create the ELB""" + if not self.elb: + # Zones and listeners will be added at creation + self._create_elb() + else: + if self._get_scheme(): + # the only way to change the scheme is by recreating the resource + self.ensure_gone() + self._create_elb() + else: + self._set_zones() + self._set_security_groups() + self._set_elb_listeners() + self._set_subnets() + self._set_health_check() + # boto has introduced support for some ELB attributes in + # different versions, so we check first before trying to + # set them to avoid errors + if self._check_attribute_support('connection_draining'): + self._set_connection_draining_timeout() + if self._check_attribute_support('connecting_settings'): + self._set_idle_timeout() + if self._check_attribute_support('cross_zone_load_balancing'): + self._set_cross_az_load_balancing() + if self._check_attribute_support('access_log'): + self._set_access_log() + # add sitcky options + self.select_stickiness_policy() + + # ensure backend server policies are correct + self._set_backend_policies() + # set/remove instance ids + self._set_instance_ids() + + self._set_tags() + + def ensure_gone(self): + """Destroy the ELB""" + if self.elb: + self._delete_elb() + if self.wait: + elb_removed = self._wait_for_elb_removed() + # Unfortunately even though the ELB itself is removed quickly + # the interfaces take longer so reliant security groups cannot + # be deleted until the interface has registered as removed. + elb_interface_removed = self._wait_for_elb_interface_removed() + if not (elb_removed and elb_interface_removed): + self.module.fail_json(msg='Timed out waiting for removal of load balancer.') + + def get_info(self): + try: + check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] + except: + check_elb = None + + if not check_elb: + info = { + 'name': self.name, + 'status': self.status, + 'region': self.region + } + else: + try: + lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name'] + except: + lb_cookie_policy = None + try: + app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name'] + except: + app_cookie_policy = None + + info = { + 'name': check_elb.name, + 'dns_name': check_elb.dns_name, + 'zones': check_elb.availability_zones, + 'security_group_ids': check_elb.security_groups, + 'status': self.status, + 'subnets': self.subnets, + 'scheme': check_elb.scheme, + 'hosted_zone_name': check_elb.canonical_hosted_zone_name, + 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, + 'lb_cookie_policy': lb_cookie_policy, + 'app_cookie_policy': app_cookie_policy, + 'proxy_policy': self._get_proxy_protocol_policy(), + 'backends': self._get_backend_policies(), + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0, + 'region': self.region + } + + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [ dict( + instance_id = instance_state.instance_id, + reason_code = instance_state.reason_code, + state = instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + + if check_elb.health_check: + info['health_check'] = { + 'target': check_elb.health_check.target, + 'interval': check_elb.health_check.interval, + 'timeout': check_elb.health_check.timeout, + 'healthy_threshold': check_elb.health_check.healthy_threshold, + 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, + } + + if check_elb.listeners: + info['listeners'] = [self._api_listener_as_tuple(l) + for l in check_elb.listeners] + elif self.status == 'created': + # When creating a new ELB, listeners don't show in the + # immediately returned result, so just include the + # ones that were added + info['listeners'] = [self._listener_as_tuple(l) + for l in self.listeners] + else: + info['listeners'] = [] + + if self._check_attribute_support('connection_draining'): + info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout) + + if self._check_attribute_support('connecting_settings'): + info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout + + if self._check_attribute_support('cross_zone_load_balancing'): + is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') + if is_cross_az_lb_enabled: + info['cross_az_load_balancing'] = 'yes' + else: + info['cross_az_load_balancing'] = 'no' + + # return stickiness info? + + info['tags'] = self.tags + + return info + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + self.elb_conn.get_all_lb_attributes(self.name) + except (boto.exception.BotoServerError, Exception) as e: + if "LoadBalancerNotFound" in e.code: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_interface_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + elb_interfaces = self.ec2_conn.get_all_network_interfaces( + filters={'attachment.instance-owner-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name) }) + + for x in range(0, max_retries): + for interface in elb_interfaces: + try: + result = self.ec2_conn.get_all_network_interfaces(interface.id) + if result == []: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except (boto.exception.BotoServerError, Exception) as e: + if 'InvalidNetworkInterfaceID' in e.code: + status_achieved = True + break + else: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _get_elb(self): + elbs = self.elb_conn.get_all_load_balancers() + for elb in elbs: + if self.name == elb.name: + self.status = 'ok' + return elb + + def _get_elb_connection(self): + try: + return connect_to_aws(boto.ec2.elb, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + def _get_ec2_connection(self): + try: + return connect_to_aws(boto.ec2, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, Exception) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + @_throttleable_operation(_THROTTLING_RETRIES) + def _delete_elb(self): + # True if succeeds, exception raised if not + result = self.elb_conn.delete_load_balancer(name=self.name) + if result: + self.changed = True + self.status = 'deleted' + + def _create_elb(self): + listeners = [self._listener_as_tuple(l) for l in self.listeners] + self.elb = self.elb_conn.create_load_balancer(name=self.name, + zones=self.zones, + security_groups=self.security_group_ids, + complex_listeners=listeners, + subnets=self.subnets, + scheme=self.scheme) + if self.elb: + # HACK: Work around a boto bug in which the listeners attribute is + # always set to the listeners argument to create_load_balancer, and + # not the complex_listeners + # We're not doing a self.elb = self._get_elb here because there + # might be eventual consistency issues and it doesn't necessarily + # make sense to wait until the ELB gets returned from the EC2 API. + # This is necessary in the event we hit the throttling errors and + # need to retry ensure_ok + # See https://github.com/boto/boto/issues/3526 + self.elb.listeners = self.listeners + self.changed = True + self.status = 'created' + + def _create_elb_listeners(self, listeners): + """Takes a list of listener tuples and creates them""" + # True if succeeds, exception raised if not + self.changed = self.elb_conn.create_load_balancer_listeners(self.name, + complex_listeners=listeners) + + def _delete_elb_listeners(self, listeners): + """Takes a list of listener tuples and deletes them from the elb""" + ports = [l[0] for l in listeners] + + # True if succeeds, exception raised if not + self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, + ports) + + def _set_elb_listeners(self): + """ + Creates listeners specified by self.listeners; overwrites existing + listeners on these ports; removes extraneous listeners + """ + listeners_to_add = [] + listeners_to_remove = [] + listeners_to_keep = [] + + # Check for any listeners we need to create or overwrite + for listener in self.listeners: + listener_as_tuple = self._listener_as_tuple(listener) + + # First we loop through existing listeners to see if one is + # already specified for this port + existing_listener_found = None + for existing_listener in self.elb.listeners: + # Since ELB allows only one listener on each incoming port, a + # single match on the incoming port is all we're looking for + if existing_listener[0] == int(listener['load_balancer_port']): + existing_listener_found = self._api_listener_as_tuple(existing_listener) + break + + if existing_listener_found: + # Does it match exactly? + if listener_as_tuple != existing_listener_found: + # The ports are the same but something else is different, + # so we'll remove the existing one and add the new one + listeners_to_remove.append(existing_listener_found) + listeners_to_add.append(listener_as_tuple) + else: + # We already have this listener, so we're going to keep it + listeners_to_keep.append(existing_listener_found) + else: + # We didn't find an existing listener, so just add the new one + listeners_to_add.append(listener_as_tuple) + + # Check for any extraneous listeners we need to remove, if desired + if self.purge_listeners: + for existing_listener in self.elb.listeners: + existing_listener_tuple = self._api_listener_as_tuple(existing_listener) + if existing_listener_tuple in listeners_to_remove: + # Already queued for removal + continue + if existing_listener_tuple in listeners_to_keep: + # Keep this one around + continue + # Since we're not already removing it and we don't need to keep + # it, let's get rid of it + listeners_to_remove.append(existing_listener_tuple) + + if listeners_to_remove: + self._delete_elb_listeners(listeners_to_remove) + + if listeners_to_add: + self._create_elb_listeners(listeners_to_add) + + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = listener.get_complex_tuple() + if listener.ssl_certificate_id and len(base_tuple) < 5: + return base_tuple + (listener.ssl_certificate_id,) + return base_tuple + + def _listener_as_tuple(self, listener): + """Formats listener as a 4- or 5-tuples, in the order specified by the + ELB API""" + # N.B. string manipulations on protocols below (str(), upper()) is to + # ensure format matches output from ELB API + listener_list = [ + int(listener['load_balancer_port']), + int(listener['instance_port']), + str(listener['protocol'].upper()), + ] + + # Instance protocol is not required by ELB API; it defaults to match + # load balancer protocol. We'll mimic that behavior here + if 'instance_protocol' in listener: + listener_list.append(str(listener['instance_protocol'].upper())) + else: + listener_list.append(str(listener['protocol'].upper())) + + if 'ssl_certificate_id' in listener: + listener_list.append(str(listener['ssl_certificate_id'])) + + return tuple(listener_list) + + def _enable_zones(self, zones): + try: + self.elb.enable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc()) + + self.changed = True + + def _disable_zones(self, zones): + try: + self.elb.disable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc()) + self.changed = True + + def _attach_subnets(self, subnets): + self.elb_conn.attach_lb_to_subnets(self.name, subnets) + self.changed = True + + def _detach_subnets(self, subnets): + self.elb_conn.detach_lb_from_subnets(self.name, subnets) + self.changed = True + + def _set_subnets(self): + """Determine which subnets need to be attached or detached on the ELB""" + if self.subnets: + if self.purge_subnets: + subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + else: + subnets_to_detach = None + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + + if subnets_to_attach: + self._attach_subnets(subnets_to_attach) + if subnets_to_detach: + self._detach_subnets(subnets_to_detach) + + def _get_scheme(self): + """Determine if the current scheme is different than the scheme of the ELB""" + if self.scheme: + if self.elb.scheme != self.scheme: + if not self.wait: + self.module.fail_json(msg="Unable to modify scheme without using the wait option") + return True + return False + + def _set_zones(self): + """Determine which zones need to be enabled or disabled on the ELB""" + if self.zones: + if self.purge_zones: + zones_to_disable = list(set(self.elb.availability_zones) - + set(self.zones)) + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + else: + zones_to_disable = None + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + if zones_to_enable: + self._enable_zones(zones_to_enable) + # N.B. This must come second, in case it would have removed all zones + if zones_to_disable: + self._disable_zones(zones_to_disable) + + def _set_security_groups(self): + if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids): + self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) + self.changed = True + + def _set_health_check(self): + """Set health check values on ELB as needed""" + if self.health_check: + # This just makes it easier to compare each of the attributes + # and look for changes. Keys are attributes of the current + # health_check; values are desired values of new health_check + health_check_config = { + "target": self._get_health_check_target(), + "timeout": self.health_check['response_timeout'], + "interval": self.health_check['interval'], + "unhealthy_threshold": self.health_check['unhealthy_threshold'], + "healthy_threshold": self.health_check['healthy_threshold'], + } + + update_health_check = False + + # The health_check attribute is *not* set on newly created + # ELBs! So we have to create our own. + if not self.elb.health_check: + self.elb.health_check = HealthCheck() + + for attr, desired_value in health_check_config.items(): + if getattr(self.elb.health_check, attr) != desired_value: + setattr(self.elb.health_check, attr, desired_value) + update_health_check = True + + if update_health_check: + self.elb.configure_health_check(self.elb.health_check) + self.changed = True + + def _check_attribute_support(self, attr): + return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) + + def _set_cross_az_load_balancing(self): + attributes = self.elb.get_attributes() + if self.cross_az_load_balancing: + if not attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = True + else: + if attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', + attributes.cross_zone_load_balancing.enabled) + + def _set_access_log(self): + attributes = self.elb.get_attributes() + if self.access_logs: + if 's3_location' not in self.access_logs: + self.module.fail_json(msg='s3_location information required') + + access_logs_config = { + "enabled": True, + "s3_bucket_name": self.access_logs['s3_location'], + "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''), + "emit_interval": self.access_logs.get('interval', 60), + } + + update_access_logs_config = False + for attr, desired_value in access_logs_config.items(): + if getattr(attributes.access_log, attr) != desired_value: + setattr(attributes.access_log, attr, desired_value) + update_access_logs_config = True + if update_access_logs_config: + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + self.changed = True + elif attributes.access_log.enabled: + attributes.access_log.enabled = False + self.changed = True + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + + def _set_connection_draining_timeout(self): + attributes = self.elb.get_attributes() + if self.connection_draining_timeout is not None: + if not attributes.connection_draining.enabled or \ + attributes.connection_draining.timeout != self.connection_draining_timeout: + self.changed = True + attributes.connection_draining.enabled = True + attributes.connection_draining.timeout = self.connection_draining_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + else: + if attributes.connection_draining.enabled: + self.changed = True + attributes.connection_draining.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + + def _set_idle_timeout(self): + attributes = self.elb.get_attributes() + if self.idle_timeout is not None: + if attributes.connecting_settings.idle_timeout != self.idle_timeout: + self.changed = True + attributes.connecting_settings.idle_timeout = self.idle_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) + + def _policy_name(self, policy_type): + return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type + + def _create_policy(self, policy_param, policy_meth, policy): + getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy) + + def _delete_policy(self, elb_name, policy): + self.elb_conn.delete_lb_policy(elb_name, policy) + + def _update_policy(self, policy_param, policy_meth, policy_attr, policy): + self._delete_policy(self.elb.name, policy) + self._create_policy(policy_param, policy_meth, policy) + + def _set_listener_policy(self, listeners_dict, policy=None): + policy = [] if policy is None else policy + + for listener_port in listeners_dict: + if listeners_dict[listener_port].startswith('HTTP'): + self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy) + + def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs): + for p in getattr(elb_info.policies, policy_attrs['attr']): + if str(p.__dict__['policy_name']) == str(policy[0]): + if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0): + self._set_listener_policy(listeners_dict) + self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0]) + self.changed = True + break + else: + self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) + self.changed = True + + self._set_listener_policy(listeners_dict, policy) + + def select_stickiness_policy(self): + if self.stickiness: + + if 'cookie' in self.stickiness and 'expiration' in self.stickiness: + self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time') + + elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0] + d = {} + for listener in elb_info.listeners: + d[listener[0]] = listener[2] + listeners_dict = d + + if self.stickiness['type'] == 'loadbalancer': + policy = [] + policy_type = 'LBCookieStickinessPolicyType' + + if self.module.boolean(self.stickiness['enabled']): + + if 'expiration' not in self.stickiness: + self.module.fail_json(msg='expiration must be set when type is loadbalancer') + + try: + expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None + except ValueError: + self.module.fail_json(msg='expiration must be set to an integer') + + policy_attrs = { + 'type': policy_type, + 'attr': 'lb_cookie_stickiness_policies', + 'method': 'create_lb_cookie_stickiness_policy', + 'dict_key': 'cookie_expiration_period', + 'param_value': expiration + } + policy.append(self._policy_name(policy_attrs['type'])) + + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.lb_cookie_stickiness_policies): + if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + else: + self.changed = False + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + elif self.stickiness['type'] == 'application': + policy = [] + policy_type = 'AppCookieStickinessPolicyType' + if self.module.boolean(self.stickiness['enabled']): + + if 'cookie' not in self.stickiness: + self.module.fail_json(msg='cookie must be set when type is application') + + policy_attrs = { + 'type': policy_type, + 'attr': 'app_cookie_stickiness_policies', + 'method': 'create_app_cookie_stickiness_policy', + 'dict_key': 'cookie_name', + 'param_value': self.stickiness['cookie'] + } + policy.append(self._policy_name(policy_attrs['type'])) + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.app_cookie_stickiness_policies): + if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + else: + self._set_listener_policy(listeners_dict) + + def _get_backend_policies(self): + """Get a list of backend policies""" + policies = [] + if self.elb.backends is not None: + for backend in self.elb.backends: + if backend.policies is not None: + for policy in backend.policies: + policies.append(str(backend.instance_port) + ':' + policy.policy_name) + + return policies + + def _set_backend_policies(self): + """Sets policies for all backends""" + ensure_proxy_protocol = False + replace = [] + backend_policies = self._get_backend_policies() + + # Find out what needs to be changed + for listener in self.listeners: + want = False + + if 'proxy_protocol' in listener and listener['proxy_protocol']: + ensure_proxy_protocol = True + want = True + + if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies: + if not want: + replace.append({'port': listener['instance_port'], 'policies': []}) + elif want: + replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']}) + + # enable or disable proxy protocol + if ensure_proxy_protocol: + self._set_proxy_protocol_policy() + + # Make the backend policies so + for item in replace: + self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies']) + self.changed = True + + def _get_proxy_protocol_policy(self): + """Find out if the elb has a proxy protocol enabled""" + if self.elb.policies is not None and self.elb.policies.other_policies is not None: + for policy in self.elb.policies.other_policies: + if policy.policy_name == 'ProxyProtocol-policy': + return policy.policy_name + + return None + + def _set_proxy_protocol_policy(self): + """Install a proxy protocol policy if needed""" + proxy_policy = self._get_proxy_protocol_policy() + + if proxy_policy is None: + self.elb_conn.create_lb_policy( + self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True} + ) + self.changed = True + + # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there + + def _diff_list(self, a, b): + """Find the entries in list a that are not in list b""" + b = set(b) + return [aa for aa in a if aa not in b] + + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + instances = [] + if self.elb.instances is not None: + for instance in self.elb.instances: + instances.append(instance.id) + + return instances + + def _set_instance_ids(self): + """Register or deregister instances from an lb instance""" + assert_instances = self.instance_ids or [] + + has_instances = self._get_instance_ids() + + add_instances = self._diff_list(assert_instances, has_instances) + if add_instances: + self.elb_conn.register_instances(self.elb.name, add_instances) + self.changed = True + + if self.purge_instance_ids: + remove_instances = self._diff_list(has_instances, assert_instances) + if remove_instances: + self.elb_conn.deregister_instances(self.elb.name, remove_instances) + self.changed = True + + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return + + params = {'LoadBalancerNames.member.1': self.name} + + tagdict = dict() + + # get the current list of tags from the ELB, if ELB exists + if self.elb: + current_tags = self.elb_conn.get_list('DescribeTags', params, + [('member', Tag)]) + tagdict = dict((tag.Key, tag.Value) for tag in current_tags + if hasattr(tag, 'Key')) + + # Add missing tags + dictact = dict(set(self.tags.items()) - set(tagdict.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = dictact[key] + + self.elb_conn.make_request('AddTags', params) + self.changed=True + + # Remove extra tags + dictact = dict(set(tagdict.items()) - set(self.tags.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + + self.elb_conn.make_request('RemoveTags', params) + self.changed=True + + def _get_health_check_target(self): + """Compose target string from healthcheck parameters""" + protocol = self.health_check['ping_protocol'].upper() + path = "" + + if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: + path = self.health_check['ping_path'] + + return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['present', 'absent']}, + name={'required': True}, + listeners={'default': None, 'required': False, 'type': 'list'}, + purge_listeners={'default': True, 'required': False, 'type': 'bool'}, + instance_ids={'default': None, 'required': False, 'type': 'list'}, + purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, + zones={'default': None, 'required': False, 'type': 'list'}, + purge_zones={'default': False, 'required': False, 'type': 'bool'}, + security_group_ids={'default': None, 'required': False, 'type': 'list'}, + security_group_names={'default': None, 'required': False, 'type': 'list'}, + health_check={'default': None, 'required': False, 'type': 'dict'}, + subnets={'default': None, 'required': False, 'type': 'list'}, + purge_subnets={'default': False, 'required': False, 'type': 'bool'}, + scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, + connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, + idle_timeout={'default': None, 'type': 'int', 'required': False}, + cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False}, + stickiness={'default': None, 'required': False, 'type': 'dict'}, + access_logs={'default': None, 'required': False, 'type': 'dict'}, + wait={'default': False, 'type': 'bool', 'required': False}, + wait_timeout={'default': 60, 'type': 'int', 'required': False}, + tags={'default': None, 'required': False, 'type': 'dict'} + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive = [['security_group_ids', 'security_group_names']] + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + name = module.params['name'] + state = module.params['state'] + listeners = module.params['listeners'] + purge_listeners = module.params['purge_listeners'] + instance_ids = module.params['instance_ids'] + purge_instance_ids = module.params['purge_instance_ids'] + zones = module.params['zones'] + purge_zones = module.params['purge_zones'] + security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] + health_check = module.params['health_check'] + access_logs = module.params['access_logs'] + subnets = module.params['subnets'] + purge_subnets = module.params['purge_subnets'] + scheme = module.params['scheme'] + connection_draining_timeout = module.params['connection_draining_timeout'] + idle_timeout = module.params['idle_timeout'] + cross_az_load_balancing = module.params['cross_az_load_balancing'] + stickiness = module.params['stickiness'] + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + tags = module.params['tags'] + + if state == 'present' and not listeners: + module.fail_json(msg="At least one listener is required for ELB creation") + + if state == 'present' and not (zones or subnets): + module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + + if wait_timeout > 600: + module.fail_json(msg='wait_timeout maximum is 600 seconds') + + if security_group_names: + security_group_ids = [] + try: + ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) + if subnets: # We have at least one subnet, ergo this is a VPC + vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) + vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id + filters = {'vpc_id': vpc_id} + else: + filters = None + grp_details = ec2.get_all_security_groups(filters=filters) + + for group_name in security_group_names: + if isinstance(group_name, string_types): + group_name = [group_name] + + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg = str(e)) + + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, + purge_zones, security_group_ids, health_check, + subnets, purge_subnets, scheme, + connection_draining_timeout, idle_timeout, + cross_az_load_balancing, + access_logs, stickiness, wait, wait_timeout, tags, + region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, + **aws_connect_params) + + # check for unsupported attributes for this version of boto + if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): + module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") + + if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): + module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") + + if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): + module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") + + if state == 'present': + elb_man.ensure_ok() + elif state == 'absent': + elb_man.ensure_gone() + + ansible_facts = {'ec2_elb': 'info'} + ec2_facts_result = dict(changed=elb_man.changed, + elb=elb_man.get_info(), + ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py b/lib/ansible/modules/cloud/amazon/elb_classic_lb.py index 331661a813a..81c354cb883 100644 --- a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py +++ b/lib/ansible/modules/cloud/amazon/elb_classic_lb.py @@ -7,7 +7,7 @@ __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], + 'status': ['preview'], 'supported_by': 'certified'} @@ -170,8 +170,7 @@ EXAMPLES = """ # Basic provisioning example (non-VPC) -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: present zones: @@ -188,11 +187,11 @@ EXAMPLES = """ instance_port: 80 # ssl certificate required for https or ssl ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + delegate_to: localhost # Internal ELB example -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-vpc" scheme: internal state: present @@ -206,10 +205,10 @@ EXAMPLES = """ - protocol: http # options are http, https, ssl, tcp load_balancer_port: 80 instance_port: 80 + delegate_to: localhost # Configure a health check and the access logs -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: present zones: @@ -230,33 +229,33 @@ EXAMPLES = """ interval: 5 # minutes (defaults to 60) s3_location: "my-bucket" # This value is required if access_logs is set s3_prefix: "logs" + delegate_to: localhost # Ensure ELB is gone -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: absent + delegate_to: localhost # Ensure ELB is gone and wait for check (for default timeout) -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: absent wait: yes + delegate_to: localhost # Ensure ELB is gone and wait for check with timeout value -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: absent wait: yes wait_timeout: 600 + delegate_to: localhost # Normally, this module will purge any listeners that exist on the ELB # but aren't specified in the listeners parameter. If purge_listeners is # false it leaves them alone -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: present zones: @@ -267,12 +266,12 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 purge_listeners: no + delegate_to: localhost # Normally, this module will leave availability zones that are enabled # on the ELB alone. If purge_zones is true, then any extraneous zones # will be removed -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "test-please-delete" state: present zones: @@ -283,10 +282,10 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 purge_zones: yes + delegate_to: localhost # Creates a ELB and assigns a list of subnets to it. -- local_action: - module: ec2_elb_lb +- elb_classic_lb: state: present name: 'New ELB' security_group_ids: 'sg-123456, sg-67890' @@ -297,11 +296,11 @@ EXAMPLES = """ - protocol: http load_balancer_port: 80 instance_port: 80 + delegate_to: localhost # Create an ELB with connection draining, increased idle timeout and cross availability # zone load balancing -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "New ELB" state: present connection_draining_timeout: 60 @@ -315,10 +314,10 @@ EXAMPLES = """ - protocol: http load_balancer_port: 80 instance_port: 80 + delegate_to: localhost # Create an ELB with load balancer stickiness enabled -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -333,10 +332,10 @@ EXAMPLES = """ type: loadbalancer enabled: yes expiration: 300 + delegate_to: localhost # Create an ELB with application stickiness enabled -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -351,10 +350,10 @@ EXAMPLES = """ type: application enabled: yes cookie: SESSIONID + delegate_to: localhost # Create an ELB and add tags -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -369,10 +368,10 @@ EXAMPLES = """ Name: "New ELB" stack: "production" client: "Bob" + delegate_to: localhost # Delete all tags from an ELB -- local_action: - module: ec2_elb_lb +- elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -384,6 +383,7 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 tags: {} + delegate_to: localhost """ import random diff --git a/lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py b/lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py index 0ba9ab89b88..036af42a166 100644 --- a/lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py +++ b/lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py @@ -45,35 +45,29 @@ EXAMPLES = ''' # Output format tries to match ec2_elb_lb module input parameters # Gather facts about all ELBs -- action: - module: ec2_elb_facts +- elb_classic_lb_facts: register: elb_facts -- action: - module: debug +- debug: msg: "{{ item.dns_name }}" with_items: "{{ elb_facts.elbs }}" # Gather facts about a particular ELB -- action: - module: ec2_elb_facts +- elb_classic_lb_facts: names: frontend-prod-elb register: elb_facts -- action: - module: debug +- debug: msg: "{{ elb_facts.elbs.0.dns_name }}" # Gather facts about a set of ELBs -- action: - module: ec2_elb_facts +- elb_classic_lb_facts: names: - frontend-prod-elb - backend-prod-elb register: elb_facts -- action: - module: debug +- debug: msg: "{{ item.dns_name }}" with_items: "{{ elb_facts.elbs }}" @@ -208,7 +202,6 @@ class ElbInformation(object): elb_info['instances_inservice_percent'] = 0. return elb_info - def list_elbs(self): elb_array, token = [], None get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) diff --git a/lib/ansible/modules/cloud/amazon/elb_instance.py b/lib/ansible/modules/cloud/amazon/elb_instance.py index bf0694a6de9..dea3a00c14f 100644 --- a/lib/ansible/modules/cloud/amazon/elb_instance.py +++ b/lib/ansible/modules/cloud/amazon/elb_instance.py @@ -7,7 +7,7 @@ __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], + 'status': ['preview'], 'supported_by': 'certified'} @@ -77,19 +77,19 @@ pre_tasks: - name: Gathering ec2 facts action: ec2_facts - name: Instance De-register - local_action: - module: ec2_elb + elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" state: absent + delegate_to: localhost roles: - myrole post_tasks: - name: Instance Register - local_action: - module: ec2_elb + elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: present + delegate_to: localhost with_items: "{{ ec2_elbs }}" """ diff --git a/test/integration/targets/ec2_elb_lb/aliases b/test/integration/targets/ec2_elb_lb/aliases new file mode 100644 index 00000000000..ebdf4aa5720 --- /dev/null +++ b/test/integration/targets/ec2_elb_lb/aliases @@ -0,0 +1,2 @@ +cloud/aws +posix/ci/cloud/group1/aws diff --git a/test/integration/targets/ec2_elb_lb/defaults/main.yml b/test/integration/targets/ec2_elb_lb/defaults/main.yml new file mode 100644 index 00000000000..76164523d46 --- /dev/null +++ b/test/integration/targets/ec2_elb_lb/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for test_ec2_eip +tag_prefix: '{{resource_prefix}}' diff --git a/test/integration/targets/ec2_elb_lb/meta/main.yml b/test/integration/targets/ec2_elb_lb/meta/main.yml new file mode 100644 index 00000000000..1f64f1169a9 --- /dev/null +++ b/test/integration/targets/ec2_elb_lb/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_ec2 diff --git a/test/integration/targets/ec2_elb_lb/tasks/main.yml b/test/integration/targets/ec2_elb_lb/tasks/main.yml new file mode 100644 index 00000000000..728fef7b51f --- /dev/null +++ b/test/integration/targets/ec2_elb_lb/tasks/main.yml @@ -0,0 +1,419 @@ +--- +# __Test Info__ +# Create a self signed cert and upload it to AWS +# http://www.akadia.com/services/ssh_test_certificate.html +# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html + +# __Test Outline__ +# +# __ec2_elb_lb__ +# create test elb with listeners and certificate +# change AZ's +# change listeners +# remove listeners +# remove elb + +# __ec2-common__ +# test environment variable EC2_REGION +# test with no parameters +# test with only instance_id +# test invalid region parameter +# test valid region parameter +# test invalid ec2_url parameter +# test valid ec2_url parameter +# test credentials from environment +# test credential parameters + +- block: + + # ============================================================ + # create test elb with listeners, certificate, and health check + + - name: Create ELB + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + state: present + zones: + - us-east-1c + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/index.html" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 10 + register: info + + - assert: + that: + - 'info.changed' + - 'info.elb.status == "created"' + - '"us-east-1c" in info.elb.zones' + - '"us-east-1d" in info.elb.zones' + - 'info.elb.health_check.healthy_threshold == 10' + - 'info.elb.health_check.interval == 30' + - 'info.elb.health_check.target == "HTTP:80/index.html"' + - 'info.elb.health_check.timeout == 5' + - 'info.elb.health_check.unhealthy_threshold == 2' + - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners' + - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners' + + # ============================================================ + + # check ports, would be cool, but we are at the mercy of AWS + # to start things in a timely manner + + #- name: check to make sure 80 is listening + # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600 + # register: result + + #- name: assert can connect to port# + # assert: 'result.state == "started"' + + #- name: check to make sure 443 is listening + # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600 + # register: result + + #- name: assert can connect to port# + # assert: 'result.state == "started"' + + # ============================================================ + + # Change AZ's + + - name: Change AZ's + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + state: present + zones: + - us-east-1b + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/index.html" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 10 + register: info + + + + - assert: + that: + - 'info.elb.status == "ok"' + - 'info.changed' + - 'info.elb.zones[0] == "us-east-1b"' + + # ============================================================ + + # Update AZ's + + - name: Update AZ's + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + state: present + zones: + - us-east-1b + - us-east-1c + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + register: info + + - assert: + that: + - 'info.changed' + - 'info.elb.status == "ok"' + - '"us-east-1b" in info.elb.zones' + - '"us-east-1c" in info.elb.zones' + - '"us-east-1d" in info.elb.zones' + + + # ============================================================ + + # Purge Listeners + + - name: Purge Listeners + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + state: present + zones: + - us-east-1b + - us-east-1c + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 81 + purge_listeners: yes + register: info + + - assert: + that: + - 'info.elb.status == "ok"' + - 'info.changed' + - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners' + - 'info.elb.listeners|length == 1' + + + + # ============================================================ + + # add Listeners + + - name: Add Listeners + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + state: present + zones: + - us-east-1b + - us-east-1c + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 8081 + instance_port: 8081 + purge_listeners: no + register: info + + - assert: + that: + - 'info.elb.status == "ok"' + - 'info.changed' + - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners' + - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners' + - 'info.elb.listeners|length == 2' + + + # ============================================================ + + - name: test with no parameters + ec2_elb_lb: + register: result + ignore_errors: true + + - name: assert failure when called with no parameters + assert: + that: + - 'result.failed' + - 'result.msg.startswith("missing required arguments: ")' + + + + # ============================================================ + - name: test with only name + ec2_elb_lb: + name="{{ tag_prefix }}" + register: result + ignore_errors: true + + - name: assert failure when called with only name + assert: + that: + - 'result.failed' + - 'result.msg == "missing required arguments: state"' + + + # ============================================================ + - name: test invalid region parameter + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: 'asdf querty 1234' + state: present + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + zones: + - us-east-1c + - us-east-1d + register: result + ignore_errors: true + + - name: assert invalid region parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")' + + + # ============================================================ + - name: test valid region parameter + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + + register: result + ignore_errors: true + + - name: assert valid region parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + + + # ============================================================ + + - name: test invalid ec2_url parameter + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + environment: + EC2_URL: bogus.example.com + register: result + ignore_errors: true + + - name: assert invalid ec2_url parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + + + # ============================================================ + - name: test valid ec2_url parameter + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + environment: + EC2_URL: '{{ec2_url}}' + register: result + ignore_errors: true + + - name: assert valid ec2_url parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + + + # ============================================================ + - name: test credentials from environment + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + environment: + EC2_ACCESS_KEY: bogus_access_key + EC2_SECRET_KEY: bogus_secret_key + register: result + ignore_errors: true + + - name: assert credentials from environment + assert: + that: + - 'result.failed' + - '"InvalidClientTokenId" in result.exception' + + + # ============================================================ + - name: test credential parameters + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + register: result + ignore_errors: true + + - name: assert credential parameters + assert: + that: + - 'result.failed' + - '"No handler was ready to authenticate. 1 handlers were checked." in result.msg' + + always: + + # ============================================================ + - name: remove the test load balancer completely + ec2_elb_lb: + name: "{{ tag_prefix }}" + region: "{{ ec2_region }}" + state: absent + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + security_token: "{{ security_token }}" + register: result + + - name: assert the load balancer was removed + assert: + that: + - 'result.changed' + - 'result.elb.name == "{{tag_prefix}}"' + - 'result.elb.status == "deleted"' diff --git a/test/integration/targets/ec2_elb_lb/vars/main.yml b/test/integration/targets/ec2_elb_lb/vars/main.yml new file mode 100644 index 00000000000..79194af1ef5 --- /dev/null +++ b/test/integration/targets/ec2_elb_lb/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for test_ec2_elb_lb diff --git a/test/integration/targets/elb_classic_lb/tasks/main.yml b/test/integration/targets/elb_classic_lb/tasks/main.yml index 728fef7b51f..d66661c1284 100644 --- a/test/integration/targets/elb_classic_lb/tasks/main.yml +++ b/test/integration/targets/elb_classic_lb/tasks/main.yml @@ -6,7 +6,7 @@ # __Test Outline__ # -# __ec2_elb_lb__ +# __elb_classic_lb__ # create test elb with listeners and certificate # change AZ's # change listeners @@ -30,7 +30,7 @@ # create test elb with listeners, certificate, and health check - name: Create ELB - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" ec2_access_key: "{{ ec2_access_key }}" @@ -95,7 +95,7 @@ # Change AZ's - name: Change AZ's - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" ec2_access_key: "{{ ec2_access_key }}" @@ -132,7 +132,7 @@ # Update AZ's - name: Update AZ's - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" ec2_access_key: "{{ ec2_access_key }}" @@ -164,7 +164,7 @@ # Purge Listeners - name: Purge Listeners - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" ec2_access_key: "{{ ec2_access_key }}" @@ -196,7 +196,7 @@ # add Listeners - name: Add Listeners - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" ec2_access_key: "{{ ec2_access_key }}" @@ -226,7 +226,7 @@ # ============================================================ - name: test with no parameters - ec2_elb_lb: + elb_classic_lb: register: result ignore_errors: true @@ -240,7 +240,7 @@ # ============================================================ - name: test with only name - ec2_elb_lb: + elb_classic_lb: name="{{ tag_prefix }}" register: result ignore_errors: true @@ -254,7 +254,7 @@ # ============================================================ - name: test invalid region parameter - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: 'asdf querty 1234' state: present @@ -277,7 +277,7 @@ # ============================================================ - name: test valid region parameter - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: present @@ -302,7 +302,7 @@ # ============================================================ - name: test invalid ec2_url parameter - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: present @@ -327,7 +327,7 @@ # ============================================================ - name: test valid ec2_url parameter - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: present @@ -352,7 +352,7 @@ # ============================================================ - name: test credentials from environment - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: present @@ -378,7 +378,7 @@ # ============================================================ - name: test credential parameters - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: present @@ -402,7 +402,7 @@ # ============================================================ - name: remove the test load balancer completely - ec2_elb_lb: + elb_classic_lb: name: "{{ tag_prefix }}" region: "{{ ec2_region }}" state: absent diff --git a/test/sanity/pep8/legacy-files.txt b/test/sanity/pep8/legacy-files.txt index 026b55f9c5c..4dc282a8af5 100644 --- a/test/sanity/pep8/legacy-files.txt +++ b/test/sanity/pep8/legacy-files.txt @@ -4,9 +4,8 @@ lib/ansible/config/data.py lib/ansible/config/manager.py lib/ansible/modules/cloud/amazon/_ec2_ami_search.py lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py -lib/ansible/modules/cloud/amazon/_ec2_elb.py -lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py -lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py +lib/ansible/modules/cloud/amazon/ec2_elb.py +lib/ansible/modules/cloud/amazon/ec2_elb_lb.py lib/ansible/modules/cloud/amazon/_ec2_vpc.py lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_options.py lib/ansible/modules/cloud/openstack/_os_server_actions.py @@ -46,7 +45,6 @@ lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py lib/ansible/modules/cloud/amazon/efs.py lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py lib/ansible/modules/cloud/amazon/elb_instance.py -lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py lib/ansible/modules/cloud/amazon/elb_classic_lb.py lib/ansible/modules/cloud/amazon/execute_lambda.py lib/ansible/modules/cloud/amazon/iam.py