aws_s3: Improve ETag handling (#46332)

* cosmetic: Remove useless call to ec2_argument_spec()

* aws_s3: Improve ETag handling

* Extract ETag calculation into a utility function for reuse by
  aws_s3_sync.
* Reduce code duplication in put/get by restructuring the logic
* Only calculate ETag when overwrite == different
* Fail gracefully when overwrite == different and MD5 isn't available
  (e.g. due to FIPS-140-2).

* aws_s3: clean up integration tests

Clean up tests, add tests for overwrite settings in both directions.
pull/47825/head
flowerysong 5 years ago committed by Jill R
parent 0e7e3c0ae8
commit c513c1e2d3

@ -0,0 +1,2 @@
bugfixes:
- aws_s3 - Improve usability when the execution host lacks MD5 support (e.g. due to FIPS-140-2).

@ -0,0 +1,47 @@
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by the calling module
HAS_MD5 = True
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
if '-' in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split('-')[1])
digests = []
s3_kwargs = dict(
Bucket=bucket,
Key=obj,
)
if version:
s3_kwargs['VersionId'] = version
with open(filename, 'rb') as f:
for part_num in range(1, parts + 1):
s3_kwargs['PartNumber'] = part_num
try:
head = s3.head_object(**s3_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
digests.append(md5(f.read(int(head['ContentLength']))))
digest_squared = md5(b''.join(m.digest() for m in digests))
return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))

@ -297,14 +297,14 @@ s3_keys:
- prefix1/key2 - prefix1/key2
''' '''
import hashlib
import mimetypes import mimetypes
import os import os
from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.six.moves.urllib.parse import urlparse
from ssl import SSLError from ssl import SSLError
from ansible.module_utils.basic import to_text, to_native from ansible.module_utils.basic import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
try: try:
import botocore import botocore
@ -340,42 +340,21 @@ def key_check(module, s3, bucket, obj, version=None, validate=True):
return exists return exists
def keysum_compare(module, local_file, s3, bucket, obj, version=None): def etag_compare(module, local_file, s3, bucket, obj, version=None):
s3_keysum = keysum(s3, bucket, obj, version=version) s3_etag = get_etag(s3, bucket, obj, version=version)
if '-' in s3_keysum: # Check for multipart, ETag is not a proper MD5 sum local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
parts = int(s3_keysum.split('-')[1])
md5s = []
with open(local_file, 'rb') as f: return s3_etag == local_etag
for part_num in range(1, parts + 1):
# Get the part size for every part of the multipart uploaded object
if version:
key_head = s3.head_object(Bucket=bucket, Key=obj, VersionId=version, PartNumber=part_num)
else:
key_head = s3.head_object(Bucket=bucket, Key=obj, PartNumber=part_num)
part_size = int(key_head['ContentLength'])
data = f.read(part_size)
hash = hashlib.md5(data)
md5s.append(hash)
digests = b''.join(m.digest() for m in md5s)
digests_md5 = hashlib.md5(digests)
local_keysum = '{0}-{1}'.format(digests_md5.hexdigest(), len(md5s))
else: # Compute the MD5 sum normally
local_keysum = module.md5(local_file)
return s3_keysum == local_keysum
def keysum(s3, bucket, obj, version=None): def get_etag(s3, bucket, obj, version=None):
if version: if version:
key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
else: else:
key_check = s3.head_object(Bucket=bucket, Key=obj) key_check = s3.head_object(Bucket=bucket, Key=obj)
if not key_check: if not key_check:
return None return None
md5_remote = key_check['ETag'][1:-1] return key_check['ETag']
return md5_remote
def bucket_check(module, s3, bucket, validate=True): def bucket_check(module, s3, bucket, validate=True):
@ -670,32 +649,29 @@ def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=F
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = dict(
argument_spec.update( bucket=dict(required=True),
dict( dest=dict(default=None, type='path'),
bucket=dict(required=True), encrypt=dict(default=True, type='bool'),
dest=dict(default=None, type='path'), encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
encrypt=dict(default=True, type='bool'), expiry=dict(default=600, type='int', aliases=['expiration']),
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), headers=dict(type='dict'),
expiry=dict(default=600, type='int', aliases=['expiration']), marker=dict(default=""),
headers=dict(type='dict'), max_keys=dict(default=1000, type='int'),
marker=dict(default=""), metadata=dict(type='dict'),
max_keys=dict(default=1000, type='int'), mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
metadata=dict(type='dict'), object=dict(),
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), permission=dict(type='list', default=['private']),
object=dict(), version=dict(default=None),
permission=dict(type='list', default=['private']), overwrite=dict(aliases=['force'], default='always'),
version=dict(default=None), prefix=dict(default=""),
overwrite=dict(aliases=['force'], default='always'), retries=dict(aliases=['retry'], type='int', default=0),
prefix=dict(default=""), s3_url=dict(aliases=['S3_URL']),
retries=dict(aliases=['retry'], type='int', default=0), dualstack=dict(default='no', type='bool'),
s3_url=dict(aliases=['S3_URL']), rgw=dict(default='no', type='bool'),
dualstack=dict(default='no', type='bool'), src=dict(),
rgw=dict(default='no', type='bool'), ignore_nonexistent_bucket=dict(default=False, type='bool'),
src=dict(), encryption_kms_key_id=dict()
ignore_nonexistent_bucket=dict(default=False, type='bool'),
encryption_kms_key_id=dict()
),
) )
module = AnsibleAWSModule( module = AnsibleAWSModule(
argument_spec=argument_spec, argument_spec=argument_spec,
@ -735,6 +711,9 @@ def main():
else: else:
overwrite = 'never' overwrite = 'never'
if overwrite == 'different' and not HAS_MD5:
module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None): if region in ('us-east-1', '', None):
@ -792,9 +771,7 @@ def main():
if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
module.fail_json(msg="Source bucket cannot be found.") module.fail_json(msg="Source bucket cannot be found.")
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get': if mode == 'get':
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn is False: if keyrtn is False:
if version: if version:
@ -802,86 +779,42 @@ def main():
else: else:
module.fail_json(msg="Key %s does not exist." % obj) module.fail_json(msg="Key %s does not exist." % obj)
# If the destination path doesn't exist or overwrite is True, no need to do the md5sum ETag check, so just download. if path_check(dest) and overwrite != 'always':
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if overwrite == 'never':
if path_check(dest): module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
# Determine if the remote and local object are identical if etag_compare(module, dest, s3, bucket, obj, version=version):
if keysum_compare(module, dest, s3, bucket, obj, version=version): module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
sum_matches = True
if overwrite == 'always': try:
try: download_s3file(module, s3, bucket, obj, dest, retries, version=version)
download_s3file(module, s3, bucket, obj, dest, retries, version=version) except Sigv4Required:
except Sigv4Required: s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) download_s3file(module, s3, bucket, obj, dest, retries, version=version)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
else:
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put': if mode == 'put':
# if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
# these were separated into the variables bucket_acl and object_acl above # these were separated into the variables bucket_acl and object_acl above
# Lets check the src path.
if not path_check(src): if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist") module.fail_json(msg="Local object for PUT does not exist")
# Lets check to see if bucket exists to get ground truth.
if bucketrtn: if bucketrtn:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
else:
# Lets check key state. Does it exist and if it does, compute the ETag md5sum. # If the bucket doesn't exist we should create it.
if bucketrtn and keyrtn:
# Compare the local and remote object
if keysum_compare(module, src, s3, bucket, obj):
sum_matches = True
if overwrite == 'always':
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if not bucketrtn:
# only use valid bucket acls for create_bucket function # only use valid bucket acls for create_bucket function
module.params['permission'] = bucket_acl module.params['permission'] = bucket_acl
create_bucket(module, s3, bucket, location) create_bucket(module, s3, bucket, location)
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl if keyrtn and overwrite != 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
# Return the download URL for the existing object
# If bucket exists but key doesn't, just upload. get_download_url(module, s3, bucket, obj, expiry, changed=False)
if bucketrtn and not keyrtn:
# only use valid object acls for the upload_s3file function # only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket # Delete an object from a bucket, not the entire bucket
if mode == 'delobj': if mode == 'delobj':

@ -11,6 +11,15 @@
no_log: yes no_log: yes
- block: - block:
- name: Create temporary directory
tempfile:
state: directory
register: tmpdir
- name: Create content
set_fact:
content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
- name: test create bucket without permissions - name: test create bucket without permissions
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
@ -18,8 +27,7 @@
register: result register: result
ignore_errors: yes ignore_errors: yes
- name: assert nice message returned - assert:
assert:
that: that:
- result is failed - result is failed
- "result.msg != 'MODULE FAILURE'" - "result.msg != 'MODULE FAILURE'"
@ -30,10 +38,10 @@
mode: create mode: create
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert changed is True
assert: - assert:
that: that:
- result.changed == True - result is changed
- name: trying to create a bucket name that already exists - name: trying to create a bucket name that already exists
aws_s3: aws_s3:
@ -41,54 +49,49 @@
mode: create mode: create
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert changed is False since the bucket already exists
assert:
that:
- result.changed == False
- name: create temporary file object to put in a bucket - assert:
tempfile: that:
register: tmp1 - result is not changed
- name: make random contents
set_fact:
content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
- name: give temporary file data - name: Create local upload.txt
copy: copy:
content: "{{ content }}" content: "{{ content }}"
dest: "{{ tmp1.path }}" dest: "{{ tmpdir.path }}/upload.txt"
- name: get the stat of the file
- name: stat the file
stat: stat:
path: "{{ tmp1.path }}" path: "{{ tmpdir.path }}/upload.txt"
get_checksum: yes get_checksum: yes
register: file1stat register: upload_file
- name: test putting an object in the bucket - name: test putting an object in the bucket
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: "{{ tmp1.path }}" src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt object: delete.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert object exists
assert: - assert:
that: that:
- result.changed == True - result is changed
- result.msg == "PUT operation complete" - result.msg == "PUT operation complete"
- name: test using aws_s3 with async - name: test using aws_s3 with async
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: "{{ tmp1.path }}" src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt object: delete.txt
<<: *aws_connection_info <<: *aws_connection_info
register: test_async register: test_async
async: 30 async: 30
poll: 0 poll: 0
- name: ensure it completed - name: ensure it completed
async_status: async_status:
jid: "{{ test_async.ansible_job_id }}" jid: "{{ test_async.ansible_job_id }}"
@ -96,55 +99,176 @@
until: status is finished until: status is finished
retries: 10 retries: 10
- name: test put with overwrite=different and unmodified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: check that roles file lookups work as expected - name: check that roles file lookups work as expected
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: hello.txt src: hello.txt
object: hello.txt object: delete.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert object exists
assert: - assert:
that: that:
- result.changed == True - result is changed
- result.msg == "PUT operation complete" - result.msg == "PUT operation complete"
- name: remove hello.txt (deletion tests are later)
- name: test put with overwrite=never
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: delobj mode: put
object: hello.txt src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: create a second temp file to download the object from the bucket - assert:
tempfile: that:
register: tmp2 - result is not changed
- name: test put with overwrite=different and modified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test put with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get object - name: test get object
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: get mode: get
dest: "{{ tmp2.path }}" dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt object: delete.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
until: "result.msg == 'GET operation complete'" until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
- name: stat the file so we can compare the checksums
stat: stat:
path: "{{ tmp2.path }}" path: "{{ tmpdir.path }}/download.txt"
get_checksum: yes get_checksum: yes
register: file2stat register: download_file
- name: assert checksums are the same
assert: - assert:
that:
- upload_file.stat.checksum == download_file.stat.checksum
- name: test get with overwrite=different and identical files
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: modify destination
copy:
dest: "{{ tmpdir.path }}/download.txt"
src: hello.txt
- name: test get with overwrite=never
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that: that:
- file1stat.stat.checksum == file2stat.stat.checksum - result is not changed
- name: test get with overwrite=different and modified file
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test geturl of the object - name: test geturl of the object
aws_s3: aws_s3:
@ -155,12 +279,12 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
until: result.changed until: result is changed
- name: assert we have the object's url
assert: - assert:
that: that:
- "'Download url:' in result.msg" - "'Download url:' in result.msg"
- result.changed == True - result is changed
- name: test getstr of the object - name: test getstr of the object
aws_s3: aws_s3:
@ -171,8 +295,8 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert that we have the object's contents
assert: - assert:
that: that:
- result.msg == "GET operation complete" - result.msg == "GET operation complete"
- result.contents == content - result.contents == content
@ -185,8 +309,8 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert that the keys are correct
assert: - assert:
that: that:
- "'delete.txt' in result.s3_keys" - "'delete.txt' in result.s3_keys"
- result.msg == "LIST operation complete" - result.msg == "LIST operation complete"
@ -200,61 +324,51 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert that delete.txt is no longer an object in the bucket deleteme
assert: - assert:
that:
- "'Object deleted from bucket' in result.msg"
- result.changed == True
- name: assert that delete.txt is no longer an object in the bucket deleteme
assert:
that: that:
- "'Object deleted from bucket' in result.msg" - "'Object deleted from bucket' in result.msg"
- result.changed == True - result is changed
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
- name: test putting an encrypted object in the bucket - name: test putting an encrypted object in the bucket
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: "{{ tmp1.path }}" src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes encrypt: yes
object: delete_encrypt.txt object: delete_encrypt.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert object exists
assert: - assert:
that: that:
- result.changed == True - result is changed
- result.msg == "PUT operation complete" - result.msg == "PUT operation complete"
- name: create a second temp file to download the object from the bucket
tempfile:
register: tmp2
- name: test get encrypted object - name: test get encrypted object
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: get mode: get
dest: "{{ tmp2.path }}" dest: "{{ tmpdir.path }}/download_encrypted.txt"
object: delete_encrypt.txt object: delete_encrypt.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
until: "result.msg == 'GET operation complete'" until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
- name: stat the file so we can compare the checksums
stat: stat:
path: "{{ tmp2.path }}" path: "{{ tmpdir.path }}/download_encrypted.txt"
get_checksum: yes get_checksum: yes
register: file2stat register: download_file
- name: assert checksums are the same
assert: - assert:
that: that:
- file1stat.stat.checksum == file2stat.stat.checksum - upload_file.stat.checksum == download_file.stat.checksum
- name: delete encrypted file - name: delete encrypted file
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
@ -263,16 +377,12 @@
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
- name: test putting an aws:kms encrypted object in the bucket - name: test putting an aws:kms encrypted object in the bucket
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: "{{ tmp1.path }}" src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes encrypt: yes
encryption_mode: aws:kms encryption_mode: aws:kms
object: delete_encrypt_kms.txt object: delete_encrypt_kms.txt
@ -280,46 +390,36 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert object exists
assert: - assert:
that: that:
- result.changed == True - result is changed
- result.msg == "PUT operation complete" - result.msg == "PUT operation complete"
- name: create a second temp file to download the object from the bucket
tempfile:
register: tmp2
- name: test get KMS encrypted object - name: test get KMS encrypted object
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: get mode: get
dest: "{{ tmp2.path }}" dest: "{{ tmpdir.path }}/download_kms.txt"
object: delete_encrypt_kms.txt object: delete_encrypt_kms.txt
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
until: "result.msg == 'GET operation complete'" until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums - name: get the stat of the file so we can compare the checksums
stat: stat:
path: "{{ tmp2.path }}" path: "{{ tmpdir.path }}/download_kms.txt"
get_checksum: yes get_checksum: yes
register: file2stat register: download_file
- name: assert checksums are the same
assert: - assert:
that: that:
- file1stat.stat.checksum == file2stat.stat.checksum - upload_file.stat.checksum == download_file.stat.checksum
# FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
- name: test get KMS encrypted object using v4 signature
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
until: "result.msg == 'GET operation complete'"
- name: delete KMS encrypted file - name: delete KMS encrypted file
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
@ -328,10 +428,6 @@
<<: *aws_connection_info <<: *aws_connection_info
retries: 3 retries: 3
delay: 3 delay: 3
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
# FIXME: could use a test that checks non standard KMS key # FIXME: could use a test that checks non standard KMS key
# but that would require ability to create and remove such keys. # but that would require ability to create and remove such keys.
@ -346,11 +442,12 @@
retries: 3 retries: 3
delay: 3 delay: 3
register: result register: result
- name: assert that empty path is created
assert: - assert:
that: that:
- "'Virtual directory foo/bar/baz/ created' in result.msg" - "'Virtual directory foo/bar/baz/ created' in result.msg"
- result.changed == True - result is changed
- name: test deletion of empty path - name: test deletion of empty path
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
@ -368,11 +465,11 @@
register: result register: result
retries: 3 retries: 3
delay: 3 delay: 3
until: result.changed until: result is changed
- name: assert that changed is True
assert: - assert:
that: that:
- result.changed == True - result is changed
- name: test create a bucket with a dot in the name - name: test create a bucket with a dot in the name
aws_s3: aws_s3:
@ -380,10 +477,10 @@
mode: create mode: create
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert that changed is True
assert: - assert:
that: that:
- result.changed == True - result is changed
- name: test delete a bucket with a dot in the name - name: test delete a bucket with a dot in the name
aws_s3: aws_s3:
@ -391,10 +488,10 @@
mode: delete mode: delete
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert that changed is True
assert: - assert:
that: that:
- result.changed == True - result is changed
- name: test delete a nonexistent bucket - name: test delete a nonexistent bucket
aws_s3: aws_s3:
@ -402,19 +499,19 @@
mode: delete mode: delete
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert that changed is False
assert: - assert:
that: that:
- result.changed == False - result is not changed
- name: make tempfile 4 GB for OSX - name: make tempfile 4 GB for OSX
command: command:
_raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1m count=4096" _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1m count=4096"
when: ansible_distribution == 'MacOSX' when: ansible_distribution == 'MacOSX'
- name: make tempfile 4 GB for linux - name: make tempfile 4 GB for linux
command: command:
_raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1M count=4096" _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1M count=4096"
when: ansible_system == 'Linux' when: ansible_system == 'Linux'
- name: test multipart download - platform specific - name: test multipart download - platform specific
@ -429,7 +526,7 @@
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: put mode: put
src: "{{ tmp1.path }}" src: "{{ tmpdir.path }}/largefile"
object: multipart.txt object: multipart.txt
<<: *aws_connection_info <<: *aws_connection_info
@ -437,7 +534,7 @@
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: get mode: get
dest: "{{ tmp2.path }}" dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt object: multipart.txt
overwrite: different overwrite: different
<<: *aws_connection_info <<: *aws_connection_info
@ -446,53 +543,43 @@
until: "result.msg == 'GET operation complete'" until: "result.msg == 'GET operation complete'"
register: result register: result
- name: assert the file was downloaded once - assert:
assert:
that: that:
- result.changed - result is changed
- name: download file again - name: download file again
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: get mode: get
dest: "{{ tmp2.path }}" dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt object: multipart.txt
overwrite: different overwrite: different
<<: *aws_connection_info <<: *aws_connection_info
register: result register: result
- name: assert the file was not redownloaded - assert:
assert:
that: that:
- not result.changed - result is not changed
when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX' when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
always: always:
###### TEARDOWN STARTS HERE ######
- name: remove uploaded files - name: remove uploaded files
aws_s3: aws_s3:
bucket: "{{ bucket_name }}" bucket: "{{ bucket_name }}"
mode: delobj mode: delobj
object: "{{ item }}" object: "{{ item }}"
<<: *aws_connection_info <<: *aws_connection_info
with_items: loop:
- hello.txt - hello.txt
- delete.txt - delete.txt
- delete_encrypt.txt - delete_encrypt.txt
- delete_encrypt_kms.txt - delete_encrypt_kms.txt
ignore_errors: yes ignore_errors: yes
- name: delete temporary file 1 - name: delete temporary files
file:
state: absent
path: "{{ tmp1.path }}"
ignore_errors: yes
- name: delete temporary file 2
file: file:
state: absent state: absent
path: "{{ tmp2.path }}" path: "{{ tmpdir.path }}"
ignore_errors: yes ignore_errors: yes
- name: delete the bucket - name: delete the bucket

Loading…
Cancel
Save