From f61164406ecd9f1f918123bd32d59fe361589965 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Thu, 7 Jun 2018 01:22:52 +1000 Subject: [PATCH] [aws] Support custom KMS keys in `aws_s3` module (#35761) * Allow the use of 'aws:kms' as an encryption method * Allow the use of a non standard KMS key * Deduce whether AWS Signature Version 4 is required rather than specifying with a parameter --- lib/ansible/modules/cloud/amazon/aws_s3.py | 69 +- .../integration/targets/aws_s3/tasks/main.yml | 706 +++++++++++------- 2 files changed, 477 insertions(+), 298 deletions(-) diff --git a/lib/ansible/modules/cloud/amazon/aws_s3.py b/lib/ansible/modules/cloud/amazon/aws_s3.py index 949c3c9e4e4..26a045ef4b2 100644 --- a/lib/ansible/modules/cloud/amazon/aws_s3.py +++ b/lib/ansible/modules/cloud/amazon/aws_s3.py @@ -52,6 +52,14 @@ options: - When set for PUT mode, asks for server-side encryption. default: True version_added: "2.0" + encryption_mode: + description: + - What encryption mode to use if C(encrypt) is set + default: AES256 + choices: + - AES256 + - aws:kms + version_added: "2.7" expiration: description: - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. @@ -140,6 +148,10 @@ options: GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying ignore_nonexistent_bucket: True." version_added: "2.3" + encryption_kms_key_id: + description: + - KMS key id to use when encrypting objects using C(aws:kms) encryption. Ignored if encryption is not C(aws:kms) + version_added: "2.7" requirements: [ "boto3", "botocore" ] author: @@ -290,6 +302,10 @@ except ImportError: pass # will be detected by imported HAS_BOTO3 +class Sigv4Required(Exception): + pass + + def key_check(module, s3, bucket, obj, version=None, validate=True): exists = True try: @@ -443,7 +459,9 @@ def create_dirkey(module, s3, bucket, obj, encrypt): try: params = {'Bucket': bucket, 'Key': obj, 'Body': b''} if encrypt: - params['ServerSideEncryption'] = 'AES256' + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] s3.put_object(**params) for acl in module.params.get('permission'): @@ -481,7 +499,9 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, heade try: extra = {} if encrypt: - extra['ServerSideEncryption'] = 'AES256' + extra['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] if metadata: extra['Metadata'] = {} @@ -522,7 +542,9 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): else: key = s3.get_object(Bucket=bucket, Key=obj) except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != "404": + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + elif e.response['Error']['Code'] != "404": module.fail_json(msg="Could not find the key %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) for x in range(0, retries + 1): @@ -551,8 +573,11 @@ def download_s3str(module, s3, bucket, obj, version=None, validate=True): contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) module.exit_json(msg="GET operation complete", contents=contents, changed=True) except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed while getting contents of object %s as a string." % obj, - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + else: + module.fail_json(msg="Failed while getting contents of object %s as a string." % obj, + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) def get_download_url(module, s3, bucket, obj, expiry, changed=True): @@ -584,7 +609,7 @@ def is_walrus(s3_url): return False -def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url): +def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False): if s3_url and rgw: # TODO - test this rgw = urlparse(s3_url) params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) @@ -607,6 +632,10 @@ def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url): params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs) else: params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') return boto3_conn(**params) @@ -617,6 +646,7 @@ def main(): bucket=dict(required=True), dest=dict(default=None, type='path'), encrypt=dict(default=True, type='bool'), + encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), expiry=dict(default=600, type='int', aliases=['expiration']), headers=dict(type='dict'), marker=dict(default=""), @@ -632,7 +662,8 @@ def main(): s3_url=dict(aliases=['S3_URL']), rgw=dict(default='no', type='bool'), src=dict(), - ignore_nonexistent_bucket=dict(default=False, type='bool') + ignore_nonexistent_bucket=dict(default=False, type='bool'), + encryption_kms_key_id=dict() ), ) module = AnsibleModule( @@ -746,18 +777,30 @@ def main(): if keysum_compare(module, dest, s3, bucket, obj, version=version): sum_matches = True if overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest, retries, version=version) + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False if overwrite in ('always', 'different'): - download_s3file(module, s3, bucket, obj, dest, retries, version=version) + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") else: - download_s3file(module, s3, bucket, obj, dest, retries, version=version) + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': @@ -887,7 +930,11 @@ def main(): if bucket and obj: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: - download_s3str(module, s3, bucket, obj, version=version) + try: + download_s3str(module, s3, bucket, obj, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3str(module, s3, bucket, obj, version=version) elif version is not None: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) else: diff --git a/test/integration/targets/aws_s3/tasks/main.yml b/test/integration/targets/aws_s3/tasks/main.yml index 0b5562a3e52..501bbf73fca 100644 --- a/test/integration/targets/aws_s3/tasks/main.yml +++ b/test/integration/targets/aws_s3/tasks/main.yml @@ -1,6 +1,6 @@ --- # tasks file for test_s3 -# ============================================================ + - name: set up aws connection info set_fact: aws_connection_info: &aws_connection_info @@ -9,333 +9,465 @@ security_token: "{{ security_token }}" region: "{{ aws_region }}" no_log: yes -# ============================================================ -- name: test create bucket - aws_s3: - bucket: "{{ bucket_name }}" - mode: create - <<: *aws_connection_info - register: result -- name: assert changed is True - assert: - that: - - result.changed == True -# ============================================================ -- name: trying to create a bucket name that already exists - aws_s3: - bucket: "{{ bucket_name }}" - mode: create - <<: *aws_connection_info - register: result -- name: assert changed is False since the bucket already exists - assert: - that: - - result.changed == False -# ============================================================ -- name: create temporary file object to put in a bucket - tempfile: - register: tmp1 -- name: make random contents - set_fact: - content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" - -- name: give temporary file data - copy: - content: "{{ content }}" - dest: "{{ tmp1.path }}" -- name: get the stat of the file - stat: - path: "{{ tmp1.path }}" - get_checksum: yes - register: file1stat -# ============================================================ -- name: test putting an object in the bucket - aws_s3: - bucket: "{{ bucket_name }}" - mode: put - src: "{{ tmp1.path }}" - object: delete.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert object exists - assert: - that: - - result.changed == True - - result.msg == "PUT operation complete" -# ============================================================ -- name: check that roles file lookups work as expected - aws_s3: - bucket: "{{ bucket_name }}" - mode: put - src: hello.txt - object: hello.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert object exists - assert: - that: - - result.changed == True - - result.msg == "PUT operation complete" -- name: remove hello.txt (deletion tests are later) - aws_s3: - bucket: "{{ bucket_name }}" - mode: delobj - object: hello.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -# ============================================================ -- name: create a second temp file to download the object from the bucket - tempfile: - register: tmp2 -- name: test get object - aws_s3: - bucket: "{{ bucket_name }}" - mode: get - dest: "{{ tmp2.path }}" - object: delete.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result - until: "result.msg == 'GET operation complete'" -- name: get the stat of the file so we can compare the checksums - stat: - path: "{{ tmp2.path }}" - get_checksum: yes - register: file2stat -- name: assert checksums are the same - assert: - that: - - file1stat.stat.checksum == file2stat.stat.checksum -# ============================================================ -- name: test geturl of the object - aws_s3: - bucket: "{{ bucket_name }}" - mode: geturl - object: delete.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result - until: result.changed -- name: assert we have the object's url - assert: - that: - - "'Download url:' in result.msg" - - result.changed == True -# ============================================================ -- name: test getstr of the object - aws_s3: - bucket: "{{ bucket_name }}" - mode: getstr - object: delete.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert that we have the object's contents - assert: - that: - - result.msg == "GET operation complete" - - result.contents == content -# ============================================================ -- name: test list to get all objects in the bucket - aws_s3: - bucket: "{{ bucket_name }}" - mode: list - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert that the keys are correct - assert: - that: - - "'delete.txt' in result.s3_keys" - - result.msg == "LIST operation complete" -# ============================================================ -- name: test delobj to just delete an object in the bucket - aws_s3: - bucket: "{{ bucket_name }}" - mode: delobj - object: delete.txt - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert that delete.txt is no longer an object in the bucket deleteme - assert: - that: - - "'Object deleted from bucket' in result.msg" - - result.changed == True -- name: assert that delete.txt is no longer an object in the bucket deleteme - assert: - that: - - "'Object deleted from bucket' in result.msg" - - result.changed == True -# ============================================================ -- name: test creation of empty path - aws_s3: - bucket: "{{ bucket_name }}" - mode: create - object: foo/bar/baz/ - <<: *aws_connection_info - retries: 3 - delay: 3 - register: result -- name: assert that empty path is created - assert: - that: - - "'Virtual directory foo/bar/baz/ created' in result.msg" - - result.changed == True -- name: test deletion of empty path - aws_s3: - bucket: "{{ bucket_name }}" - mode: delobj - object: foo/bar/baz/ - <<: *aws_connection_info - retries: 3 - delay: 3 -# ============================================================ -- name: test delete bucket - aws_s3: - bucket: "{{ bucket_name }}" - mode: delete - <<: *aws_connection_info - register: result - retries: 3 - delay: 3 - until: result.changed -- name: assert that changed is True - assert: - that: - - result.changed == True -# ============================================================ -- name: delete temporary file 1 - file: - state: absent - path: "{{ tmp1.path }}" -- name: delete temporary file 2 - file: - state: absent - path: "{{ tmp2.path }}" -# ============================================================ -- name: test create a bucket with a dot in the name - aws_s3: - bucket: "{{ bucket_name + '.bucket' }}" - mode: create - <<: *aws_connection_info - register: result -- name: assert that changed is True - assert: - that: - - result.changed == True -# ============================================================ -- name: test delete a bucket with a dot in the name - aws_s3: - bucket: "{{ bucket_name + '.bucket' }}" - mode: delete - <<: *aws_connection_info - register: result -- name: assert that changed is True - assert: - that: - - result.changed == True -# ============================================================ -- name: test delete a nonexistent bucket - aws_s3: - bucket: "{{ bucket_name + '.bucket' }}" - mode: delete - <<: *aws_connection_info - register: result -- name: assert that changed is False - assert: - that: - - result.changed == False -# ============================================================ -- name: create a tempfile for the path - tempfile: - register: tmp1 - -- name: make tempfile 4 GB for OSX - command: - _raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1m count=4096" - when: ansible_distribution == 'MacOSX' - -- name: make tempfile 4 GB for linux - command: - _raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1M count=4096" - when: ansible_distribution == 'Linux' - -- name: test multipart download - platform specific - block: - - name: make a bucket to upload the file + +- block: + - name: test create bucket aws_s3: bucket: "{{ bucket_name }}" mode: create <<: *aws_connection_info + register: result + - name: assert changed is True + assert: + that: + - result.changed == True - - name: upload the file to the bucket + - name: trying to create a bucket name that already exists + aws_s3: + bucket: "{{ bucket_name }}" + mode: create + <<: *aws_connection_info + register: result + - name: assert changed is False since the bucket already exists + assert: + that: + - result.changed == False + + - name: create temporary file object to put in a bucket + tempfile: + register: tmp1 + - name: make random contents + set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + + - name: give temporary file data + copy: + content: "{{ content }}" + dest: "{{ tmp1.path }}" + - name: get the stat of the file + stat: + path: "{{ tmp1.path }}" + get_checksum: yes + register: file1stat + + - name: test putting an object in the bucket aws_s3: bucket: "{{ bucket_name }}" mode: put src: "{{ tmp1.path }}" - object: multipart.txt + object: delete.txt <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert object exists + assert: + that: + - result.changed == True + - result.msg == "PUT operation complete" - - name: download file once + - name: check that roles file lookups work as expected + aws_s3: + bucket: "{{ bucket_name }}" + mode: put + src: hello.txt + object: hello.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert object exists + assert: + that: + - result.changed == True + - result.msg == "PUT operation complete" + - name: remove hello.txt (deletion tests are later) + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: hello.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + + - name: create a second temp file to download the object from the bucket + tempfile: + register: tmp2 + + - name: test get object aws_s3: bucket: "{{ bucket_name }}" mode: get - dest: /tmp/multipart_download.txt - object: multipart.txt - overwrite: different + dest: "{{ tmp2.path }}" + object: delete.txt <<: *aws_connection_info retries: 3 delay: 3 + register: result until: "result.msg == 'GET operation complete'" + - name: get the stat of the file so we can compare the checksums + stat: + path: "{{ tmp2.path }}" + get_checksum: yes + register: file2stat + - name: assert checksums are the same + assert: + that: + - file1stat.stat.checksum == file2stat.stat.checksum + + - name: test geturl of the object + aws_s3: + bucket: "{{ bucket_name }}" + mode: geturl + object: delete.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + until: result.changed + - name: assert we have the object's url + assert: + that: + - "'Download url:' in result.msg" + - result.changed == True + + - name: test getstr of the object + aws_s3: + bucket: "{{ bucket_name }}" + mode: getstr + object: delete.txt + <<: *aws_connection_info + retries: 3 + delay: 3 register: result + - name: assert that we have the object's contents + assert: + that: + - result.msg == "GET operation complete" + - result.contents == content - - name: assert the file was downloaded once + - name: test list to get all objects in the bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: list + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert that the keys are correct assert: that: - - result.changed + - "'delete.txt' in result.s3_keys" + - result.msg == "LIST operation complete" - - name: download file again + - name: test delobj to just delete an object in the bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert that delete.txt is no longer an object in the bucket deleteme + assert: + that: + - "'Object deleted from bucket' in result.msg" + - result.changed == True + - name: assert that delete.txt is no longer an object in the bucket deleteme + assert: + that: + - "'Object deleted from bucket' in result.msg" + - result.changed == True + - name: clean up temp file + file: + path: "{{ tmp2.path }}" + state: absent + + - name: test putting an encrypted object in the bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ tmp1.path }}" + encrypt: yes + object: delete_encrypt.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert object exists + assert: + that: + - result.changed == True + - result.msg == "PUT operation complete" + + - name: create a second temp file to download the object from the bucket + tempfile: + register: tmp2 + - name: test get encrypted object aws_s3: bucket: "{{ bucket_name }}" mode: get - dest: /tmp/multipart_download.txt - object: multipart.txt - overwrite: different + dest: "{{ tmp2.path }}" + object: delete_encrypt.txt <<: *aws_connection_info + retries: 3 + delay: 3 register: result + until: "result.msg == 'GET operation complete'" + - name: get the stat of the file so we can compare the checksums + stat: + path: "{{ tmp2.path }}" + get_checksum: yes + register: file2stat + - name: assert checksums are the same + assert: + that: + - file1stat.stat.checksum == file2stat.stat.checksum + - name: delete encrypted file + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete_encrypt.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + - name: clean up temp file + file: + path: "{{ tmp2.path }}" + state: absent - - name: assert the file was not redownloaded + - name: test putting an aws:kms encrypted object in the bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ tmp1.path }}" + encrypt: yes + encryption_mode: aws:kms + object: delete_encrypt_kms.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert object exists assert: that: - - not result.changed + - result.changed == True + - result.msg == "PUT operation complete" - - name: delete file used for upload + - name: create a second temp file to download the object from the bucket + tempfile: + register: tmp2 + - name: test get KMS encrypted object + aws_s3: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ tmp2.path }}" + object: delete_encrypt_kms.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + until: "result.msg == 'GET operation complete'" + - name: get the stat of the file so we can compare the checksums + stat: + path: "{{ tmp2.path }}" + get_checksum: yes + register: file2stat + - name: assert checksums are the same + assert: + that: + - file1stat.stat.checksum == file2stat.stat.checksum + # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted + - name: test get KMS encrypted object using v4 signature + aws_s3: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ tmp2.path }}" + object: delete_encrypt_kms.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + until: "result.msg == 'GET operation complete'" + - name: delete KMS encrypted file + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete_encrypt_kms.txt + <<: *aws_connection_info + retries: 3 + delay: 3 + - name: clean up temp file + file: + path: "{{ tmp2.path }}" + state: absent + + # FIXME: could use a test that checks non standard KMS key + # but that would require ability to create and remove such keys. + # PRs exist for that, but propose deferring until after merge. + + - name: test creation of empty path + aws_s3: + bucket: "{{ bucket_name }}" + mode: create + object: foo/bar/baz/ + <<: *aws_connection_info + retries: 3 + delay: 3 + register: result + - name: assert that empty path is created + assert: + that: + - "'Virtual directory foo/bar/baz/ created' in result.msg" + - result.changed == True + - name: test deletion of empty path + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: foo/bar/baz/ + <<: *aws_connection_info + retries: 3 + delay: 3 + + - name: test delete bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: delete + <<: *aws_connection_info + register: result + retries: 3 + delay: 3 + until: result.changed + - name: assert that changed is True + assert: + that: + - result.changed == True + + - name: test create a bucket with a dot in the name + aws_s3: + bucket: "{{ bucket_name + '.bucket' }}" + mode: create + <<: *aws_connection_info + register: result + - name: assert that changed is True + assert: + that: + - result.changed == True + + - name: test delete a bucket with a dot in the name + aws_s3: + bucket: "{{ bucket_name + '.bucket' }}" + mode: delete + <<: *aws_connection_info + register: result + - name: assert that changed is True + assert: + that: + - result.changed == True + + - name: test delete a nonexistent bucket + aws_s3: + bucket: "{{ bucket_name + '.bucket' }}" + mode: delete + <<: *aws_connection_info + register: result + - name: assert that changed is False + assert: + that: + - result.changed == False + + - name: make tempfile 4 GB for OSX + command: + _raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1m count=4096" + when: ansible_distribution == 'MacOSX' + + - name: make tempfile 4 GB for linux + command: + _raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1M count=4096" + when: ansible_system == 'Linux' + + - name: test multipart download - platform specific + block: + - name: make a bucket to upload the file + aws_s3: + bucket: "{{ bucket_name }}" + mode: create + <<: *aws_connection_info + + - name: upload the file to the bucket + aws_s3: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ tmp1.path }}" + object: multipart.txt + <<: *aws_connection_info + + - name: download file once + aws_s3: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ tmp2.path }}" + object: multipart.txt + overwrite: different + <<: *aws_connection_info + retries: 3 + delay: 3 + until: "result.msg == 'GET operation complete'" + register: result + + - name: assert the file was downloaded once + assert: + that: + - result.changed + + - name: download file again + aws_s3: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ tmp2.path }}" + object: multipart.txt + overwrite: different + <<: *aws_connection_info + register: result + + - name: assert the file was not redownloaded + assert: + that: + - not result.changed + when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX' + + always: + ###### TEARDOWN STARTS HERE ###### + + - name: remove uploaded files + aws_s3: + bucket: "{{ bucket_name }}" + mode: delobj + object: "{{ item }}" + <<: *aws_connection_info + with_items: + - hello.txt + - delete.txt + - delete_encrypt.txt + - delete_encrypt_kms.txt + ignore_errors: yes + + - name: delete temporary file 1 file: state: absent path: "{{ tmp1.path }}" + ignore_errors: yes - - name: delete downloaded file + - name: delete temporary file 2 file: state: absent - path: /tmp/multipart_download.txt + path: "{{ tmp2.path }}" + ignore_errors: yes - name: delete the bucket aws_s3: bucket: "{{ bucket_name }}" mode: delete <<: *aws_connection_info - - when: ansible_distribution in ['MacOSX', 'Linux'] -# ============================================================ + ignore_errors: yes