Migrate s3_bucket module to boto3 (#37189)

pull/38424/head
Julien Vey 7 years ago committed by Ryan Brown
parent 9bb1ee30bf
commit bab947d854

@ -25,6 +25,7 @@ short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
description:
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
version_added: "2.0"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
force:
@ -105,48 +106,22 @@ EXAMPLES = '''
import json
import os
import traceback
import xml.etree.ElementTree as ET
import time
import ansible.module_utils.six.moves.urllib.parse as urlparse
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.ec2 import sort_json_policy_dict, compare_policies
from ansible.module_utils.basic import to_text
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location, S3Connection
from boto.s3.tagging import Tags, TagSet
from boto.exception import BotoServerError, S3CreateError, S3ResponseError, BotoClientError
HAS_BOTO = True
from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
except ImportError:
HAS_BOTO = False
pass # handled by AnsibleAWSModule
def get_request_payment_status(bucket):
response = bucket.get_request_payment()
root = ET.fromstring(response)
for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
payer = message.text
return (payer != "BucketOwner")
def create_tags_container(tags):
tag_set = TagSet()
tags_obj = Tags()
for key, val in tags.items():
tag_set.add_tag(key, val)
tags_obj.add_tag_set(tag_set)
return tags_obj
def _create_or_update_bucket(connection, module, location):
def create_or_update_bucket(s3_client, module, location):
policy = module.params.get("policy")
name = module.params.get("name")
@ -156,170 +131,325 @@ def _create_or_update_bucket(connection, module, location):
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
bucket_is_present = bucket_exists(s3_client, name)
except EndpointConnectionError as e:
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to check bucket presence")
if not bucket_is_present:
try:
bucket = connection.create_bucket(name, location=location)
changed = True
except (S3CreateError, BotoClientError) as e:
module.fail_json(msg=e.message)
bucket_changed = create_bucket(s3_client, name, location)
s3_client.get_waiter('bucket_exists').wait(Bucket=name)
changed = changed or bucket_changed
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed while creating bucket")
# Versioning
versioning_status = bucket.get_versioning_status()
if versioning is not None:
if versioning and versioning_status.get('Versioning') != "Enabled":
try:
bucket.configure_versioning(versioning)
changed = True
versioning_status = bucket.get_versioning_status()
except S3ResponseError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
elif not versioning and versioning_status.get('Versioning') == "Enabled":
versioning_status = get_bucket_versioning(s3_client, name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket versioning")
if versioning is not None:
required_versioning = None
if versioning and versioning_status.get('Status') != "Enabled":
required_versioning = 'Enabled'
elif not versioning and versioning_status.get('Status') == "Enabled":
required_versioning = 'Suspended'
if required_versioning:
try:
bucket.configure_versioning(versioning)
put_bucket_versioning(s3_client, name, required_versioning)
changed = True
versioning_status = bucket.get_versioning_status()
except S3ResponseError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket versioning")
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
# This output format is there to ensure compatibility with previous versions of the module
versioning_return_value = {
'Versioning': versioning_status.get('Status', 'Disabled'),
'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
}
# Requester pays
requester_pays_status = get_request_payment_status(bucket)
if requester_pays_status != requester_pays:
if requester_pays:
payer = 'Requester'
else:
payer = 'BucketOwner'
bucket.set_request_payment(payer=payer)
try:
requester_pays_status = get_bucket_request_payment(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get bucket request payment")
payer = 'Requester' if requester_pays else 'BucketOwner'
if requester_pays_status != payer:
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
if requester_pays_status is None:
# We have seen that it happens quite a lot of times that the put request was not taken into
# account, so we retry one more time
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
changed = True
requester_pays_status = get_request_payment_status(bucket)
# Policy
try:
current_policy = json.loads(to_native(bucket.get_policy()))
except S3ResponseError as e:
if e.error_code == "NoSuchBucketPolicy":
current_policy = {}
else:
module.fail_json(msg=e.message)
current_policy = get_bucket_policy(s3_client, name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket policy")
if policy is not None:
if isinstance(policy, string_types):
policy = json.loads(policy)
if not policy:
bucket.delete_policy()
# only show changed if there was already a policy
changed = bool(current_policy)
elif compare_policies(current_policy, policy):
if not policy and current_policy:
try:
delete_bucket_policy(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
changed = True
elif compare_policies(current_policy, policy):
try:
bucket.set_policy(json.dumps(policy))
current_policy = json.loads(to_native(bucket.get_policy()))
except S3ResponseError as e:
module.fail_json(msg=e.message)
put_bucket_policy(s3_client, name, policy)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
if current_policy is None:
# As for request payement, it happens quite a lot of times that the put request was not taken into
# account, so we retry one more time
put_bucket_policy(s3_client, name, policy)
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
changed = True
# Tags
try:
current_tags = bucket.get_tags()
except S3ResponseError as e:
if e.error_code == "NoSuchTagSet":
current_tags = None
else:
module.fail_json(msg=e.message)
if current_tags is None:
current_tags_dict = {}
else:
current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket tags")
if tags is not None:
if current_tags_dict != tags:
try:
if tags:
bucket.set_tags(create_tags_container(tags))
try:
put_bucket_tagging(s3_client, name, tags)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket tags")
else:
bucket.delete_tags()
try:
delete_bucket_tagging(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket tags")
wait_tags_are_applied(module, s3_client, name, tags)
current_tags_dict = tags
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status,
requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
module.exit_json(changed=changed, name=name, versioning=versioning_return_value,
requester_pays=requester_pays, policy=current_policy, tags=current_tags_dict)
def _destroy_bucket(connection, module):
def bucket_exists(s3_client, bucket_name):
# head_bucket appeared to be really inconsistent, so we use list_buckets instead,
# and loop over all the buckets, even if we know it's less performant :(
all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
return any(bucket['Name'] == bucket_name for bucket in all_buckets)
force = module.params.get("force")
name = module.params.get("name")
changed = False
@AWSRetry.exponential_backoff(max_delay=120)
def create_bucket(s3_client, bucket_name, location):
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
if e.error_code != "NoSuchBucket":
module.fail_json(msg=e.message)
configuration = {}
if location not in ('us-east-1', None):
configuration['LocationConstraint'] = location
if len(configuration) > 0:
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
else:
s3_client.create_bucket(Bucket=bucket_name)
return True
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'BucketAlreadyOwnedByYou':
# We should never get there since we check the bucket presence before calling the create_or_update_bucket
# method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
return False
else:
# Bucket already absent
module.exit_json(changed=changed)
raise e
if force:
try:
# Empty the bucket
for key in bucket.list():
key.delete()
except BotoServerError as e:
module.fail_json(msg=e.message)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def put_bucket_tagging(s3_client, bucket_name, tags):
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def put_bucket_policy(s3_client, bucket_name, policy):
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def delete_bucket_policy(s3_client, bucket_name):
s3_client.delete_bucket_policy(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def get_bucket_policy(s3_client, bucket_name):
try:
bucket = connection.delete_bucket(name)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
current_policy = None
else:
raise e
return current_policy
module.exit_json(changed=changed)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def put_bucket_request_payment(s3_client, bucket_name, payer):
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
def _create_or_update_bucket_ceph(connection, module, location):
# TODO: add update
name = module.params.get("name")
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def get_bucket_request_payment(s3_client, bucket_name):
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def get_bucket_versioning(s3_client, bucket_name):
return s3_client.get_bucket_versioning(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def put_bucket_versioning(s3_client, bucket_name, required_versioning):
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
changed = False
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
def delete_bucket_tagging(s3_client, bucket_name):
s3_client.delete_bucket_tagging(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120)
def delete_bucket(s3_client, bucket_name):
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
s3_client.delete_bucket(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
# This means bucket should have been in a deleting state when we checked it existence
# We just ignore the error
pass
else:
raise e
def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
for dummy in range(0, 12):
try:
bucket = connection.create_bucket(name, location=location)
changed = True
except (S3CreateError, BotoClientError) as e:
module.fail_json(msg=e.message)
current_policy = get_bucket_policy(s3_client, bucket_name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket policy")
if bucket:
module.exit_json(changed=changed)
if compare_policies(current_policy, expected_policy):
time.sleep(5)
else:
module.fail_json(msg='Unable to create bucket, no error from the API')
return current_policy
if should_fail:
module.fail_json(msg="Bucket policy failed to apply in the excepted time")
else:
return None
def _destroy_bucket_ceph(connection, module):
_destroy_bucket(connection, module)
def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
for dummy in range(0, 12):
try:
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get bucket request payment")
if requester_pays_status != expected_payer:
time.sleep(5)
else:
return requester_pays_status
if should_fail:
module.fail_json(msg="Bucket request payment failed to apply in the excepted time")
else:
return None
def create_or_update_bucket(connection, module, location, flavour='aws'):
if flavour == 'ceph':
_create_or_update_bucket_ceph(connection, module, location)
def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
for dummy in range(0, 12):
try:
versioning_status = get_bucket_versioning(s3_client, bucket_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
if versioning_status.get('Status') != required_versioning:
time.sleep(5)
else:
_create_or_update_bucket(connection, module, location)
return versioning_status
module.fail_json(msg="Bucket versioning failed to apply in the excepted time")
def destroy_bucket(connection, module, flavour='aws'):
if flavour == 'ceph':
_destroy_bucket_ceph(connection, module)
def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
for dummy in range(0, 12):
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket policy")
if current_tags_dict != expected_tags_dict:
time.sleep(5)
else:
_destroy_bucket(connection, module)
return
module.fail_json(msg="Bucket tags failed to apply in the excepted time")
def get_current_bucket_tags_dict(s3_client, bucket_name):
try:
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchTagSet':
return {}
raise e
return boto3_tag_list_to_ansible_dict(current_tags)
def paginated_list(s3_client, **pagination_params):
pg = s3_client.get_paginator('list_objects_v2')
for page in pg.paginate(**pagination_params):
yield [data['Key'] for data in page.get('Contents', [])]
def destroy_bucket(s3_client, module):
force = module.params.get("force")
name = module.params.get("name")
try:
bucket_is_present = bucket_exists(s3_client, name)
except EndpointConnectionError as e:
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to check bucket presence")
if not bucket_is_present:
module.exit_json(changed=False)
if force:
# if there are contents then we need to delete them before we can delete the bucket
try:
for keys in paginated_list(s3_client, Bucket=name):
formatted_keys = [{'Key': key} for key in keys]
if formatted_keys:
s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed while deleting bucket")
try:
delete_bucket(s3_client, name)
s3_client.get_waiter('bucket_not_exists').wait(Bucket=name)
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket")
module.exit_json(changed=True)
def is_fakes3(s3_url):
@ -341,6 +471,32 @@ def is_walrus(s3_url):
return False
def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
if s3_url and ceph: # TODO - test this
ceph = urlparse(s3_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
elif is_fakes3(s3_url):
fakes3 = urlparse(s3_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
if port is None:
port = 443
else:
protocol = "http"
if port is None:
port = 80
params = dict(module=module, conn_type='client', resource='s3', region=location,
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
elif is_walrus(s3_url):
walrus = urlparse(s3_url).hostname
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
return boto3_conn(**params)
def main():
argument_spec = ec2_argument_spec()
@ -358,83 +514,45 @@ def main():
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
module = AnsibleAWSModule(argument_spec=argument_spec)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
# default to US Standard region
location = 'us-east-1'
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
s3_url = module.params.get('s3_url')
ceph = module.params.get('ceph')
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
ceph = module.params.get('ceph')
if ceph and not s3_url:
module.fail_json(msg='ceph flavour requires s3_url')
flavour = 'aws'
# bucket names with .'s in them need to use the calling_format option,
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
# for more details.
aws_connect_params['calling_format'] = OrdinaryCallingFormat()
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if s3_url and ceph:
ceph = urlparse.urlparse(s3_url)
connection = boto.connect_s3(
host=ceph.hostname,
port=ceph.port,
is_secure=ceph.scheme == 'https',
**aws_connect_params
)
flavour = 'ceph'
elif is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
connection = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
**aws_connect_params
)
elif is_walrus(s3_url):
del aws_connect_params['calling_format']
walrus = urlparse.urlparse(s3_url).hostname
connection = boto.connect_walrus(walrus, **aws_connect_params)
else:
connection = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception as e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
# if connecting to Ceph RGW, Walrus or fakes3
if s3_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
if connection is None: # this should never happen
if s3_client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
state = module.params.get("state")
if state == 'present':
create_or_update_bucket(connection, module, location, flavour=flavour)
create_or_update_bucket(s3_client, module, location)
elif state == 'absent':
destroy_bucket(connection, module, flavour=flavour)
destroy_bucket(s3_client, module)
if __name__ == '__main__':
main()

@ -1 +1,2 @@
cloud/aws
posix/ci/cloud/group4/aws

@ -86,6 +86,10 @@
- output.policy.Statement[0].Sid == 'AddPerm'
# ============================================================
- name: Pause to help with s3 bucket eventual consistency
pause:
seconds: 10
- name: Try to update the same complex s3_bucket
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible-complex"
@ -127,6 +131,10 @@
- output.policy.Statement[0].Sid == 'AddPerm'
# ============================================================
- name: Pause to help with s3 bucket eventual consistency
pause:
seconds: 10
- name: Update attributes for s3_bucket
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible-complex"
@ -146,7 +154,7 @@
- output.name == '{{ resource_prefix }}-testbucket-ansible-complex'
- not output.requester_pays
- output.versioning.MfaDelete == 'Disabled'
- output.versioning.Versioning == 'Suspended'
- output.versioning.Versioning in ['Suspended', 'Disabled']
- output.tags.example == 'tag1-udpated'
- output.tags.another == 'tag2'
- output.policy.Statement[0].Action == 's3:GetObject'
@ -155,6 +163,65 @@
- output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ resource_prefix }}-testbucket-ansible-complex/*'
- output.policy.Statement[0].Sid == 'AddPerm'
# ============================================================
- name: Pause to help with s3 bucket eventual consistency
pause:
seconds: 10
- name: Remove a tag for s3_bucket
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible-complex"
state: present
policy: "{{ lookup('template','policy.json') }}"
requester_pays: no
versioning: no
tags:
example: tag1-udpated
<<: *aws_connection_info
register: output
- assert:
that:
- output.changed
- output.tags.example == 'tag1-udpated'
- "'another' not in output.tags"
# ============================================================
- name: Pause to help with s3 bucket eventual consistency
pause:
seconds: 10
- name: Do not specify any tag to ensure previous tags are not removed
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible-complex"
state: present
policy: "{{ lookup('template','policy.json') }}"
requester_pays: no
versioning: no
<<: *aws_connection_info
register: output
- assert:
that:
- not output.changed
- output.tags.example == 'tag1-udpated'
# ============================================================
- name: Remove all tags
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible-complex"
state: present
policy: "{{ lookup('template','policy.json') }}"
requester_pays: no
versioning: no
tags: {}
<<: *aws_connection_info
register: output
- assert:
that:
- output.changed
- output.tags == {}
# ============================================================
- name: Delete s3_bucket
@ -181,6 +248,8 @@
- output.changed
- output.name == '{{ resource_prefix }}.testbucket.ansible'
# ============================================================
- name: Delete s3_bucket
s3_bucket:
name: "{{ resource_prefix }}.testbucket.ansible"
@ -192,6 +261,18 @@
that:
- output.changed
# ============================================================
- name: Try to delete a missing bucket (should not fail)
s3_bucket:
name: "{{ resource_prefix }}.testbucket.ansible.missing"
state: absent
<<: *aws_connection_info
register: output
- assert:
that:
- not output.changed
# ============================================================
always:
- name: Ensure all buckets are deleted
@ -199,6 +280,7 @@
name: "{{item}}"
state: absent
<<: *aws_connection_info
ignore_errors: yes
with_items:
- "{{ resource_prefix }}-testbucket-ansible"
- "{{ resource_prefix }}-testbucket-ansible-complex"

Loading…
Cancel
Save