Adding waiter to cluster remove process (#57324)

* Adding waiter to cluster remove process

* blank line contains whitespace

* update aws_eks integration test

* Refactor aws_eks test suite to use pip

* update version testing

* missing parens...

* add changelog fragment

* Add waiter to module_utils, fix exception handling.

* Correct EKS waiter checks
pull/60487/head^2
Shaun M 5 years ago committed by Will Thames
parent 2d98734ad5
commit fa783c027b

@ -0,0 +1,3 @@
---
minor_changes:
- aws_eks_cluster - Ansible may now wait until an EKS cluster is fully removed before moving on.

@ -199,6 +199,24 @@ eks_data = {
"expected": "ResourceNotFoundException"
}
]
},
"ClusterDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "retry",
"matcher": "path",
"argument": "cluster.status != 'DELETED'",
"expected": True
},
{
"state": "success",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
}
}
}
@ -317,6 +335,12 @@ waiters_by_name = {
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
'cluster_deleted',
eks_model('ClusterDeleted'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
'db_instance_stopped',
rds_model('DBInstanceStopped'),

@ -38,8 +38,8 @@ options:
default: present
wait:
description: >-
Specifies whether the module waits until the cluster becomes active after
creation. It takes "usually less than 10 minutes" per AWS documentation.
Specifies whether the module waits until the cluster is active or deleted
before moving on. It takes "usually less than 10 minutes" per AWS documentation.
type: bool
default: 'no'
wait_timeout:
@ -73,6 +73,7 @@ EXAMPLES = '''
- name: Remove an EKS cluster
aws_eks_cluster:
name: my_cluster
wait: yes
state: absent
'''
@ -183,7 +184,7 @@ def ensure_present(client, module):
module.fail_json(msg="Cannot modify version of existing cluster")
if wait:
wait_until_cluster_active(client, module)
wait_until(client, module, 'cluster_active')
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
@ -208,7 +209,7 @@ def ensure_present(client, module):
module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
if wait:
wait_until_cluster_active(client, module)
wait_until(client, module, 'cluster_active')
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
@ -219,6 +220,7 @@ def ensure_present(client, module):
def ensure_absent(client, module):
name = module.params.get('name')
existing = get_cluster(client, module)
wait = module.params.get('wait')
if not existing:
module.exit_json(changed=False)
if not module.check_mode:
@ -228,6 +230,10 @@ def ensure_absent(client, module):
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
if wait:
wait_until(client, module, 'cluster_deleted')
module.exit_json(changed=True)
@ -240,14 +246,14 @@ def get_cluster(client, module):
except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json(e, msg="Couldn't get cluster %s" % name)
module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
def wait_until_cluster_active(client, module):
def wait_until(client, module, waiter_name='cluster_active'):
name = module.params.get('name')
wait_timeout = module.params.get('wait_timeout')
waiter = get_waiter(client, 'cluster_active')
waiter = get_waiter(client, waiter_name)
attempts = 1 + int(wait_timeout / waiter.config.delay)
waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
@ -271,7 +277,12 @@ def main():
)
if not module.botocore_at_least("1.10.32"):
module.fail_json(msg="aws_eks_cluster module requires botocore >= 1.10.32")
module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32')
if (not module.botocore_at_least("1.12.38") and
module.params.get('state') == 'absent' and
module.params.get('wait')):
module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38')
client = module.client('eks')

@ -0,0 +1,2 @@
dependencies:
- setup_remote_tmp_dir

@ -1,6 +0,0 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- aws_eks

@ -1,17 +0,0 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
tasks:
- name: try and use aws_eks_cluster module
aws_eks_cluster:
state: absent
name: my_cluster
ignore_errors: yes
register: aws_eks_cluster
- name: ensure that aws_eks fails with friendly error message
assert:
that:
- '"msg" in aws_eks_cluster'
- aws_eks_cluster is failed

@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -eux
# Test graceful failure for older versions of botocore
source virtualenv.sh
pip install 'botocore<1.10.0' boto3
ansible-playbook -i ../../inventory -e @../../integration_config.yml -v playbooks/old_version.yml "$@"
# Run full test suite
source virtualenv.sh
pip install 'botocore>=1.10.1' boto3
ansible-playbook -i ../../inventory -e @../../integration_config.yml -v playbooks/full_test.yml "$@"

@ -0,0 +1,12 @@
- name: try and use aws_eks_cluster module
aws_eks_cluster:
state: absent
name: my_cluster
ignore_errors: yes
register: aws_eks_cluster
- name: ensure that aws_eks fails with friendly error message
assert:
that:
- '"msg" in aws_eks_cluster'
- aws_eks_cluster is failed

@ -0,0 +1,13 @@
- name: try using aws_eks_cluster wait with state=absent
aws_eks_cluster:
state: absent
name: my_cluster
wait: yes
ignore_errors: yes
register: aws_eks_cluster
- name: ensure that aws_eks fails with friendly error message
assert:
that:
- '"msg" in aws_eks_cluster'
- aws_eks_cluster is failed

@ -2,9 +2,7 @@
# tasks file for aws_eks modules
- block:
# FIXME: ap-south-1 only has two AZs, ap-south-1a and ap-south-1b
# That makes it my best guess as to it being among the last to support EKS
# If it does become supported, change this test to use an unsupported region
# If us-west-1 does become supported, change this test to use an unsupported region
# or if all regions are supported, delete this test
- name: attempt to use eks in unsupported region
aws_eks_cluster:
@ -13,7 +11,7 @@
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: ap-south-1
region: us-west-1
register: aws_eks_unsupported_region
ignore_errors: yes
@ -142,14 +140,43 @@
- eks_create is not changed
- eks_create.name == eks_cluster_name
- name: remove EKS cluster
- name: remove EKS cluster, waiting until complete
aws_eks_cluster:
name: "{{ eks_cluster_name }}"
state: absent
wait: yes
<<: *aws_connection_info
register: eks_delete
- name: check that EKS cluster was removed
assert:
that:
- eks_delete is changed
- name: create EKS cluster with same details but wait for it to become active
aws_eks_cluster:
name: "{{ eks_cluster_name }}"
security_groups: "{{ eks_security_groups | json_query('[].name') }}"
subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
role_arn: "{{ iam_role.arn }}"
wait: yes
<<: *aws_connection_info
register: eks_create
- name: check that EKS cluster was created
assert:
that:
- eks_create is changed
- eks_create.name == eks_cluster_name
- name: remove EKS cluster, without waiting this time
aws_eks_cluster:
name: "{{ eks_cluster_name }}"
state: absent
<<: *aws_connection_info
register: eks_delete
- name: check that EKS cluster remove has started
assert:
that:
- eks_delete is changed
@ -163,13 +190,11 @@
aws_eks_cluster:
name: "{{ eks_cluster_name }}"
state: absent
wait: yes
<<: *aws_connection_info
register: eks_delete
ignore_errors: yes
- pause:
minutes: 5
- debug:
msg: "{{ eks_security_groups|reverse|list }}"
@ -190,6 +215,7 @@
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
with_items: "{{ eks_security_groups }}"
ignore_errors: yes
- name: remove security groups
ec2_group:

@ -0,0 +1,66 @@
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
- pip:
name: virtualenv
# Test graceful failure for missing kubernetes-validate
- pip:
name:
- 'botocore<1.10.1'
- boto3
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: botocore_lt_1.10.1.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
# Test graceful failures when botocore<1.12.38
- pip:
name:
- 'botocore>1.10.1,<1.12.38'
- boto3
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: botocore_lt_1.12.38.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
# Test validate with kubernetes-validate
- pip:
name:
- 'botocore>=1.10.1'
- boto3
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-validate
- file:
path: "{{ virtualenv }}"
state: absent
Loading…
Cancel
Save