aws_s3: Improve ETag handling (#46332)

* cosmetic: Remove useless call to ec2_argument_spec()

* aws_s3: Improve ETag handling

* Extract ETag calculation into a utility function for reuse by
  aws_s3_sync.
* Reduce code duplication in put/get by restructuring the logic
* Only calculate ETag when overwrite == different
* Fail gracefully when overwrite == different and MD5 isn't available
  (e.g. due to FIPS-140-2).

* aws_s3: clean up integration tests

Clean up tests, add tests for overwrite settings in both directions.
pull/47825/head
flowerysong 5 years ago committed by Jill R
parent 0e7e3c0ae8
commit c513c1e2d3

@ -0,0 +1,2 @@
bugfixes:
- aws_s3 - Improve usability when the execution host lacks MD5 support (e.g. due to FIPS-140-2).

@ -0,0 +1,47 @@
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by the calling module
HAS_MD5 = True
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
if '-' in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split('-')[1])
digests = []
s3_kwargs = dict(
Bucket=bucket,
Key=obj,
)
if version:
s3_kwargs['VersionId'] = version
with open(filename, 'rb') as f:
for part_num in range(1, parts + 1):
s3_kwargs['PartNumber'] = part_num
try:
head = s3.head_object(**s3_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
digests.append(md5(f.read(int(head['ContentLength']))))
digest_squared = md5(b''.join(m.digest() for m in digests))
return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))

@ -297,14 +297,14 @@ s3_keys:
- prefix1/key2
'''
import hashlib
import mimetypes
import os
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ssl import SSLError
from ansible.module_utils.basic import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
try:
import botocore
@ -340,42 +340,21 @@ def key_check(module, s3, bucket, obj, version=None, validate=True):
return exists
def keysum_compare(module, local_file, s3, bucket, obj, version=None):
s3_keysum = keysum(s3, bucket, obj, version=version)
if '-' in s3_keysum: # Check for multipart, ETag is not a proper MD5 sum
parts = int(s3_keysum.split('-')[1])
md5s = []
def etag_compare(module, local_file, s3, bucket, obj, version=None):
s3_etag = get_etag(s3, bucket, obj, version=version)
local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
with open(local_file, 'rb') as f:
for part_num in range(1, parts + 1):
# Get the part size for every part of the multipart uploaded object
if version:
key_head = s3.head_object(Bucket=bucket, Key=obj, VersionId=version, PartNumber=part_num)
else:
key_head = s3.head_object(Bucket=bucket, Key=obj, PartNumber=part_num)
part_size = int(key_head['ContentLength'])
data = f.read(part_size)
hash = hashlib.md5(data)
md5s.append(hash)
digests = b''.join(m.digest() for m in md5s)
digests_md5 = hashlib.md5(digests)
local_keysum = '{0}-{1}'.format(digests_md5.hexdigest(), len(md5s))
else: # Compute the MD5 sum normally
local_keysum = module.md5(local_file)
return s3_keysum == local_keysum
return s3_etag == local_etag
def keysum(s3, bucket, obj, version=None):
def get_etag(s3, bucket, obj, version=None):
if version:
key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
else:
key_check = s3.head_object(Bucket=bucket, Key=obj)
if not key_check:
return None
md5_remote = key_check['ETag'][1:-1]
return md5_remote
return key_check['ETag']
def bucket_check(module, s3, bucket, validate=True):
@ -670,32 +649,29 @@ def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=F
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
bucket=dict(required=True),
dest=dict(default=None, type='path'),
encrypt=dict(default=True, type='bool'),
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
expiry=dict(default=600, type='int', aliases=['expiration']),
headers=dict(type='dict'),
marker=dict(default=""),
max_keys=dict(default=1000, type='int'),
metadata=dict(type='dict'),
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object=dict(),
permission=dict(type='list', default=['private']),
version=dict(default=None),
overwrite=dict(aliases=['force'], default='always'),
prefix=dict(default=""),
retries=dict(aliases=['retry'], type='int', default=0),
s3_url=dict(aliases=['S3_URL']),
dualstack=dict(default='no', type='bool'),
rgw=dict(default='no', type='bool'),
src=dict(),
ignore_nonexistent_bucket=dict(default=False, type='bool'),
encryption_kms_key_id=dict()
),
argument_spec = dict(
bucket=dict(required=True),
dest=dict(default=None, type='path'),
encrypt=dict(default=True, type='bool'),
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
expiry=dict(default=600, type='int', aliases=['expiration']),
headers=dict(type='dict'),
marker=dict(default=""),
max_keys=dict(default=1000, type='int'),
metadata=dict(type='dict'),
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object=dict(),
permission=dict(type='list', default=['private']),
version=dict(default=None),
overwrite=dict(aliases=['force'], default='always'),
prefix=dict(default=""),
retries=dict(aliases=['retry'], type='int', default=0),
s3_url=dict(aliases=['S3_URL']),
dualstack=dict(default='no', type='bool'),
rgw=dict(default='no', type='bool'),
src=dict(),
ignore_nonexistent_bucket=dict(default=False, type='bool'),
encryption_kms_key_id=dict()
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
@ -735,6 +711,9 @@ def main():
else:
overwrite = 'never'
if overwrite == 'different' and not HAS_MD5:
module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None):
@ -792,9 +771,7 @@ def main():
if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
module.fail_json(msg="Source bucket cannot be found.")
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn is False:
if version:
@ -802,86 +779,42 @@ def main():
else:
module.fail_json(msg="Key %s does not exist." % obj)
# If the destination path doesn't exist or overwrite is True, no need to do the md5sum ETag check, so just download.
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if path_check(dest):
# Determine if the remote and local object are identical
if keysum_compare(module, dest, s3, bucket, obj, version=version):
sum_matches = True
if overwrite == 'always':
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
else:
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
if path_check(dest) and overwrite != 'always':
if overwrite == 'never':
module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
if etag_compare(module, dest, s3, bucket, obj, version=version):
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
# these were separated into the variables bucket_acl and object_acl above
# Lets check the src path.
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist")
# Lets check to see if bucket exists to get ground truth.
if bucketrtn:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
# Lets check key state. Does it exist and if it does, compute the ETag md5sum.
if bucketrtn and keyrtn:
# Compare the local and remote object
if keysum_compare(module, src, s3, bucket, obj):
sum_matches = True
if overwrite == 'always':
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if not bucketrtn:
else:
# If the bucket doesn't exist we should create it.
# only use valid bucket acls for create_bucket function
module.params['permission'] = bucket_acl
create_bucket(module, s3, bucket, location)
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# If bucket exists but key doesn't, just upload.
if bucketrtn and not keyrtn:
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
if keyrtn and overwrite != 'always':
if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
# Return the download URL for the existing object
get_download_url(module, s3, bucket, obj, expiry, changed=False)
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':

@ -11,6 +11,15 @@
no_log: yes
- block:
- name: Create temporary directory
tempfile:
state: directory
register: tmpdir
- name: Create content
set_fact:
content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
- name: test create bucket without permissions
aws_s3:
bucket: "{{ bucket_name }}"
@ -18,8 +27,7 @@
register: result
ignore_errors: yes
- name: assert nice message returned
assert:
- assert:
that:
- result is failed
- "result.msg != 'MODULE FAILURE'"
@ -30,10 +38,10 @@
mode: create
<<: *aws_connection_info
register: result
- name: assert changed is True
assert:
- assert:
that:
- result.changed == True
- result is changed
- name: trying to create a bucket name that already exists
aws_s3:
@ -41,54 +49,49 @@
mode: create
<<: *aws_connection_info
register: result
- name: assert changed is False since the bucket already exists
assert:
that:
- result.changed == False
- name: create temporary file object to put in a bucket
tempfile:
register: tmp1
- name: make random contents
set_fact:
content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
- assert:
that:
- result is not changed
- name: give temporary file data
- name: Create local upload.txt
copy:
content: "{{ content }}"
dest: "{{ tmp1.path }}"
- name: get the stat of the file
dest: "{{ tmpdir.path }}/upload.txt"
- name: stat the file
stat:
path: "{{ tmp1.path }}"
path: "{{ tmpdir.path }}/upload.txt"
get_checksum: yes
register: file1stat
register: upload_file
- name: test putting an object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmp1.path }}"
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- name: assert object exists
assert:
- assert:
that:
- result.changed == True
- result is changed
- result.msg == "PUT operation complete"
- name: test using aws_s3 with async
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmp1.path }}"
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
<<: *aws_connection_info
register: test_async
async: 30
poll: 0
- name: ensure it completed
async_status:
jid: "{{ test_async.ansible_job_id }}"
@ -96,55 +99,176 @@
until: status is finished
retries: 10
- name: test put with overwrite=different and unmodified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: check that roles file lookups work as expected
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: hello.txt
object: hello.txt
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- name: assert object exists
assert:
- assert:
that:
- result.changed == True
- result is changed
- result.msg == "PUT operation complete"
- name: remove hello.txt (deletion tests are later)
- name: test put with overwrite=never
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: hello.txt
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- name: create a second temp file to download the object from the bucket
tempfile:
register: tmp2
- assert:
that:
- result is not changed
- name: test put with overwrite=different and modified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test put with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
- name: stat the file so we can compare the checksums
stat:
path: "{{ tmp2.path }}"
path: "{{ tmpdir.path }}/download.txt"
get_checksum: yes
register: file2stat
- name: assert checksums are the same
assert:
register: download_file
- assert:
that:
- upload_file.stat.checksum == download_file.stat.checksum
- name: test get with overwrite=different and identical files
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: modify destination
copy:
dest: "{{ tmpdir.path }}/download.txt"
src: hello.txt
- name: test get with overwrite=never
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- file1stat.stat.checksum == file2stat.stat.checksum
- result is not changed
- name: test get with overwrite=different and modified file
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test geturl of the object
aws_s3:
@ -155,12 +279,12 @@
retries: 3
delay: 3
register: result
until: result.changed
- name: assert we have the object's url
assert:
until: result is changed
- assert:
that:
- "'Download url:' in result.msg"
- result.changed == True
- result is changed
- name: test getstr of the object
aws_s3:
@ -171,8 +295,8 @@
retries: 3
delay: 3
register: result
- name: assert that we have the object's contents
assert:
- assert:
that:
- result.msg == "GET operation complete"
- result.contents == content
@ -185,8 +309,8 @@
retries: 3
delay: 3
register: result
- name: assert that the keys are correct
assert:
- assert:
that:
- "'delete.txt' in result.s3_keys"
- result.msg == "LIST operation complete"
@ -200,61 +324,51 @@
retries: 3
delay: 3
register: result
- name: assert that delete.txt is no longer an object in the bucket deleteme
assert:
that:
- "'Object deleted from bucket' in result.msg"
- result.changed == True
- name: assert that delete.txt is no longer an object in the bucket deleteme
assert:
- assert:
that:
- "'Object deleted from bucket' in result.msg"
- result.changed == True
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
- result is changed
- name: test putting an encrypted object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmp1.path }}"
src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes
object: delete_encrypt.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- name: assert object exists
assert:
- assert:
that:
- result.changed == True
- result is changed
- result.msg == "PUT operation complete"
- name: create a second temp file to download the object from the bucket
tempfile:
register: tmp2
- name: test get encrypted object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
dest: "{{ tmpdir.path }}/download_encrypted.txt"
object: delete_encrypt.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
- name: stat the file so we can compare the checksums
stat:
path: "{{ tmp2.path }}"
path: "{{ tmpdir.path }}/download_encrypted.txt"
get_checksum: yes
register: file2stat
- name: assert checksums are the same
assert:
register: download_file
- assert:
that:
- file1stat.stat.checksum == file2stat.stat.checksum
- upload_file.stat.checksum == download_file.stat.checksum
- name: delete encrypted file
aws_s3:
bucket: "{{ bucket_name }}"
@ -263,16 +377,12 @@
<<: *aws_connection_info
retries: 3
delay: 3
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
- name: test putting an aws:kms encrypted object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmp1.path }}"
src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes
encryption_mode: aws:kms
object: delete_encrypt_kms.txt
@ -280,46 +390,36 @@
retries: 3
delay: 3
register: result
- name: assert object exists
assert:
- assert:
that:
- result.changed == True
- result is changed
- result.msg == "PUT operation complete"
- name: create a second temp file to download the object from the bucket
tempfile:
register: tmp2
- name: test get KMS encrypted object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
dest: "{{ tmpdir.path }}/download_kms.txt"
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
stat:
path: "{{ tmp2.path }}"
path: "{{ tmpdir.path }}/download_kms.txt"
get_checksum: yes
register: file2stat
- name: assert checksums are the same
assert:
register: download_file
- assert:
that:
- file1stat.stat.checksum == file2stat.stat.checksum
- upload_file.stat.checksum == download_file.stat.checksum
# FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
- name: test get KMS encrypted object using v4 signature
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
until: "result.msg == 'GET operation complete'"
- name: delete KMS encrypted file
aws_s3:
bucket: "{{ bucket_name }}"
@ -328,10 +428,6 @@
<<: *aws_connection_info
retries: 3
delay: 3
- name: clean up temp file
file:
path: "{{ tmp2.path }}"
state: absent
# FIXME: could use a test that checks non standard KMS key
# but that would require ability to create and remove such keys.
@ -346,11 +442,12 @@
retries: 3
delay: 3
register: result
- name: assert that empty path is created
assert:
- assert:
that:
- "'Virtual directory foo/bar/baz/ created' in result.msg"
- result.changed == True
- result is changed
- name: test deletion of empty path
aws_s3:
bucket: "{{ bucket_name }}"
@ -368,11 +465,11 @@
register: result
retries: 3
delay: 3
until: result.changed
- name: assert that changed is True
assert:
until: result is changed
- assert:
that:
- result.changed == True
- result is changed
- name: test create a bucket with a dot in the name
aws_s3:
@ -380,10 +477,10 @@
mode: create
<<: *aws_connection_info
register: result
- name: assert that changed is True
assert:
- assert:
that:
- result.changed == True
- result is changed
- name: test delete a bucket with a dot in the name
aws_s3:
@ -391,10 +488,10 @@
mode: delete
<<: *aws_connection_info
register: result
- name: assert that changed is True
assert:
- assert:
that:
- result.changed == True
- result is changed
- name: test delete a nonexistent bucket
aws_s3:
@ -402,19 +499,19 @@
mode: delete
<<: *aws_connection_info
register: result
- name: assert that changed is False
assert:
- assert:
that:
- result.changed == False
- result is not changed
- name: make tempfile 4 GB for OSX
command:
_raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1m count=4096"
_raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1m count=4096"
when: ansible_distribution == 'MacOSX'
- name: make tempfile 4 GB for linux
command:
_raw_params: "dd if=/dev/zero of={{ tmp1.path }} bs=1M count=4096"
_raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1M count=4096"
when: ansible_system == 'Linux'
- name: test multipart download - platform specific
@ -429,7 +526,7 @@
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmp1.path }}"
src: "{{ tmpdir.path }}/largefile"
object: multipart.txt
<<: *aws_connection_info
@ -437,7 +534,7 @@
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt
overwrite: different
<<: *aws_connection_info
@ -446,53 +543,43 @@
until: "result.msg == 'GET operation complete'"
register: result
- name: assert the file was downloaded once
assert:
- assert:
that:
- result.changed
- result is changed
- name: download file again
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmp2.path }}"
dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt
overwrite: different
<<: *aws_connection_info
register: result
- name: assert the file was not redownloaded
assert:
- assert:
that:
- not result.changed
- result is not changed
when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
always:
###### TEARDOWN STARTS HERE ######
- name: remove uploaded files
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: "{{ item }}"
<<: *aws_connection_info
with_items:
loop:
- hello.txt
- delete.txt
- delete_encrypt.txt
- delete_encrypt_kms.txt
ignore_errors: yes
- name: delete temporary file 1
file:
state: absent
path: "{{ tmp1.path }}"
ignore_errors: yes
- name: delete temporary file 2
- name: delete temporary files
file:
state: absent
path: "{{ tmp2.path }}"
path: "{{ tmpdir.path }}"
ignore_errors: yes
- name: delete the bucket

Loading…
Cancel
Save