S3 versioned lifecycle (#40161)

* - add tests for s3_lifecycle
- fix a bug comparing transitions with different storage_types

* make s3_lifecycle work with boto3

* add noncurrent version lifecycle rules
pull/39130/merge
Ben Berry 6 years ago committed by Sloane Hertel
parent ca3390377a
commit 6a4f3fb729

@ -40,6 +40,40 @@ options:
prefix: prefix:
description: description:
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket." - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
purge_transitions:
description:
- >
"Whether to replace all the current transition(s) with the new transition(s). When false, the provided transition(s)
will be added, replacing transitions with the same storage_class. When true, existing transitions will be removed and
replaced with the new transition(s)
default: true
type: bool
version_added: 2.6
noncurrent_version_expiration_days:
description:
- 'Delete noncurrent versions this many days after they become noncurrent'
required: false
version_added: 2.6
noncurrent_version_storage_class:
description:
- 'Transition noncurrent versions to this storage class'
default: glacier
choices: ['glacier', 'onezone_ia', 'standard_ia']
required: false
version_added: 2.6
noncurrent_version_transition_days:
description:
- 'Transition noncurrent versions this many days after they become noncurrent'
required: false
version_added: 2.6
noncurrent_version_transitions:
description:
- >
A list of transition behaviors to be applied to noncurrent versions for the rule. Each storage class may be used only once. Each transition
behavior contains these elements
I(transition_days)
I(storage_class)
version_added: 2.6
rule_id: rule_id:
description: description:
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided." - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
@ -55,10 +89,10 @@ options:
choices: [ 'enabled', 'disabled' ] choices: [ 'enabled', 'disabled' ]
storage_class: storage_class:
description: description:
- "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'." - "The storage class to transition to. Currently there are two supported values - 'glacier', 'onezone_ia', or 'standard_ia'."
- "The 'standard_ia' class is only being available from Ansible version 2.2." - "The 'standard_ia' class is only being available from Ansible version 2.2."
default: glacier default: glacier
choices: [ 'glacier', 'standard_ia'] choices: [ 'glacier', 'onezone_ia', 'standard_ia']
transition_date: transition_date:
description: description:
- > - >
@ -68,6 +102,14 @@ options:
transition_days: transition_days:
description: description:
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required." - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
transitions:
description:
- A list of transition behaviors to be applied to the rule. Each storage class may be used only once. Each transition
behavior may contain these elements
I(transition_days)
I(transition_date)
I(storage_class)
version_added: 2.6
extends_documentation_fragment: extends_documentation_fragment:
- aws - aws
- ec2 - ec2
@ -126,10 +168,20 @@ EXAMPLES = '''
state: present state: present
status: enabled status: enabled
# Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
- s3_lifecycle:
name: mybucket
prefix: /logs/
state: present
status: enabled
transitions:
- transition_days: 30
storage_class: standard_ia
- transition_days: 90
storage_class: glacier
''' '''
import xml.etree.ElementTree as ET from copy import deepcopy
import copy
import datetime import datetime
try: try:
@ -139,157 +191,186 @@ except ImportError:
HAS_DATEUTIL = False HAS_DATEUTIL = False
try: try:
import boto from botocore.exceptions import BotoCoreError, ClientError
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
from boto.exception import BotoServerError, S3ResponseError
HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False pass # handled by AnsibleAwsModule
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def create_lifecycle_rule(connection, module): def create_lifecycle_rule(client, module):
name = module.params.get("name") name = module.params.get("name")
expiration_date = module.params.get("expiration_date") expiration_date = module.params.get("expiration_date")
expiration_days = module.params.get("expiration_days") expiration_days = module.params.get("expiration_days")
noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days")
noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days")
noncurrent_version_transitions = module.params.get("noncurrent_version_transitions")
noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class")
prefix = module.params.get("prefix") prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id") rule_id = module.params.get("rule_id")
status = module.params.get("status") status = module.params.get("status")
storage_class = module.params.get("storage_class") storage_class = module.params.get("storage_class")
transition_date = module.params.get("transition_date") transition_date = module.params.get("transition_date")
transition_days = module.params.get("transition_days") transition_days = module.params.get("transition_days")
transitions = module.params.get("transitions")
purge_transitions = module.params.get("purge_transitions")
changed = False changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules # Get the bucket's current lifecycle rules
try: try:
current_lifecycle_obj = bucket.get_lifecycle_config() current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name)
except S3ResponseError as e: current_lifecycle_rules = current_lifecycle['Rules']
if e.error_code == "NoSuchLifecycleConfiguration": except ClientError as e:
current_lifecycle_obj = Lifecycle() if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
current_lifecycle_rules = []
else: else:
module.fail_json(msg=e.message) module.fail_json_aws(e)
except BotoCoreError as e:
module.fail_json_aws(e)
rule = dict(Filter=dict(Prefix=prefix), Status=status.title())
if rule_id is not None:
rule['ID'] = rule_id
# Create expiration # Create expiration
if expiration_days is not None: if expiration_days is not None:
expiration_obj = Expiration(days=expiration_days) rule['Expiration'] = dict(Days=expiration_days)
elif expiration_date is not None: elif expiration_date is not None:
expiration_obj = Expiration(date=expiration_date) rule['Expiration'] = dict(Date=expiration_date)
else:
expiration_obj = None
# Create transition if noncurrent_version_expiration_days is not None:
if transition_days is not None: rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days)
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
elif transition_date is not None:
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
else:
transition_obj = None
# Create rule
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
# Create lifecycle if transition_days is not None:
lifecycle_obj = Lifecycle() rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ]
elif transition_date is not None:
rule['Transitions'] = [dict(Date=transition_date, StorageClass=storage_class.upper()), ]
if transitions is not None:
if not rule.get('Transitions'):
rule['Transitions'] = []
for transition in transitions:
t_out = dict()
if transition.get('transition_date'):
t_out['Date'] = transition['transition_date']
elif transition.get('transition_days'):
t_out['Days'] = transition['transition_days']
if transition.get('storage_class'):
t_out['StorageClass'] = transition['storage_class'].upper()
rule['Transitions'].append(t_out)
if noncurrent_version_transition_days is not None:
rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days,
StorageClass=noncurrent_version_storage_class.upper()), ]
if noncurrent_version_transitions is not None:
if not rule.get('NoncurrentVersionTransitions'):
rule['NoncurrentVersionTransitions'] = []
for noncurrent_version_transition in noncurrent_version_transitions:
t_out = dict()
t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days']
if noncurrent_version_transition.get('storage_class'):
t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper()
rule['NoncurrentVersionTransitions'].append(t_out)
lifecycle_configuration = dict(Rules=[])
appended = False appended = False
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
if current_lifecycle_obj: if current_lifecycle_rules:
# If rule ID exists, use that for comparison otherwise compare based on prefix # If rule ID exists, use that for comparison otherwise compare based on prefix
for existing_rule in current_lifecycle_obj: for existing_rule in current_lifecycle_rules:
if rule.id == existing_rule.id: if rule['Filter']['Prefix'] == existing_rule['Filter']['Prefix']:
if compare_rule(rule, existing_rule): existing_rule.pop('ID')
lifecycle_obj.append(rule) if rule.get('ID') == existing_rule.get('ID'):
appended = True changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration)
else: changed = changed_ or changed
lifecycle_obj.append(rule) appended = appended_ or appended
changed = True
appended = True
elif rule.prefix == existing_rule.prefix:
existing_rule.id = None
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
else: else:
lifecycle_obj.append(existing_rule) lifecycle_configuration['Rules'].append(existing_rule)
# If nothing appended then append now as the rule must not exist # If nothing appended then append now as the rule must not exist
if not appended: if not appended:
lifecycle_obj.append(rule) lifecycle_configuration['Rules'].append(rule)
changed = True changed = True
else: else:
lifecycle_obj.append(rule) lifecycle_configuration['Rules'].append(rule)
changed = True changed = True
# Write lifecycle to bucket # Write lifecycle to bucket
try: try:
bucket.configure_lifecycle(lifecycle_obj) client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration)
except S3ResponseError as e: except (BotoCoreError, ClientError) as e:
module.fail_json(msg=e.message) module.fail_json_aws(e)
module.exit_json(changed=changed) module.exit_json(changed=changed)
def compare_rule(rule_a, rule_b): def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj):
changed = False
if existing_rule['Status'] != new_rule['Status']:
if not new_rule.get('Transitions') and existing_rule.get('Transitions'):
new_rule['Transitions'] = existing_rule['Transitions']
if not new_rule.get('Expiration') and existing_rule.get('Expiration'):
new_rule['Expiration'] = existing_rule['Expiration']
if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'):
new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration']
lifecycle_obj['Rules'].append(new_rule)
changed = True
appended = True
else:
if not purge_transitions:
merge_transitions(new_rule, existing_rule)
if compare_rule(new_rule, existing_rule, purge_transitions):
lifecycle_obj['Rules'].append(new_rule)
appended = True
else:
lifecycle_obj['Rules'].append(new_rule)
changed = True
appended = True
return changed, appended
# Copy objects
rule1 = copy.deepcopy(rule_a)
rule2 = copy.deepcopy(rule_b)
# Delete Rule from Rule def compare_rule(rule_a, rule_b, purge_transitions):
try:
del rule1.Rule
except AttributeError:
pass
try: # Copy objects
del rule2.Rule rule1 = deepcopy(rule_a)
except AttributeError: rule2 = deepcopy(rule_b)
pass
if purge_transitions:
# Extract Expiration and Transition objects return rule1 == rule2
rule1_expiration = rule1.expiration
rule1_transition = rule1.transition
rule2_expiration = rule2.expiration
rule2_transition = rule2.transition
# Delete the Expiration and Transition objects from the Rule objects
del rule1.expiration
del rule1.transition
del rule2.expiration
del rule2.transition
# Compare
if rule1_transition is None:
rule1_transition = Transition()
if rule2_transition is None:
rule2_transition = Transition()
if rule1_expiration is None:
rule1_expiration = Expiration()
if rule2_expiration is None:
rule2_expiration = Expiration()
if (rule1.__dict__ == rule2.__dict__ and
rule1_expiration.__dict__ == rule2_expiration.__dict__ and
rule1_transition.__dict__ == rule2_transition.__dict__):
return True
else: else:
return False transitions1 = rule1.pop('Transitions', [])
transitions2 = rule2.pop('Transitions', [])
noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', [])
noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', [])
if rule1 != rule2:
return False
for transition in transitions1:
if transition not in transitions2:
return False
for transition in noncurrent_transtions1:
if transition not in noncurrent_transtions2:
return False
return True
def merge_transitions(updated_rule, updating_rule):
# because of the legal s3 transitions, we know only one can exist for each storage class.
# So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only
# in updating_rule to updated_rule
updated_transitions = {}
updating_transitions = {}
for transition in updated_rule['Transitions']:
updated_transitions[transition['StorageClass']] = transition
for transition in updating_rule['Transitions']:
updating_transitions[transition['StorageClass']] = transition
for storage_class, transition in updating_transitions.items():
if updated_transitions.get(storage_class) is None:
updated_rule['Transitions'].append(transition)
def destroy_lifecycle_rule(connection, module):
def destroy_lifecycle_rule(client, module):
name = module.params.get("name") name = module.params.get("name")
prefix = module.params.get("prefix") prefix = module.params.get("prefix")
@ -299,108 +380,101 @@ def destroy_lifecycle_rule(connection, module):
if prefix is None: if prefix is None:
prefix = "" prefix = ""
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules # Get the bucket's current lifecycle rules
try: try:
current_lifecycle_obj = bucket.get_lifecycle_config() current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules']
except S3ResponseError as e: except ClientError as e:
if e.error_code == "NoSuchLifecycleConfiguration": if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
module.exit_json(changed=changed) current_lifecycle_rules = []
else: else:
module.fail_json(msg=e.message) module.fail_json_aws(e)
except BotoCoreError as e:
module.fail_json_aws(e)
# Create lifecycle # Create lifecycle
lifecycle_obj = Lifecycle() lifecycle_obj = dict(Rules=[])
# Check if rule exists # Check if rule exists
# If an ID exists, use that otherwise compare based on prefix # If an ID exists, use that otherwise compare based on prefix
if rule_id is not None: if rule_id is not None:
for existing_rule in current_lifecycle_obj: for existing_rule in current_lifecycle_rules:
if rule_id == existing_rule.id: if rule_id == existing_rule['ID']:
# We're not keeping the rule (i.e. deleting) so mark as changed # We're not keeping the rule (i.e. deleting) so mark as changed
changed = True changed = True
else: else:
lifecycle_obj.append(existing_rule) lifecycle_obj['Rules'].append(existing_rule)
else: else:
for existing_rule in current_lifecycle_obj: for existing_rule in current_lifecycle_rules:
if prefix == existing_rule.prefix: if prefix == existing_rule['Filter']['Prefix']:
# We're not keeping the rule (i.e. deleting) so mark as changed # We're not keeping the rule (i.e. deleting) so mark as changed
changed = True changed = True
else: else:
lifecycle_obj.append(existing_rule) lifecycle_obj['Rules'].append(existing_rule)
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
try: try:
if lifecycle_obj: if lifecycle_obj['Rules']:
bucket.configure_lifecycle(lifecycle_obj) client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj)
else: else:
bucket.delete_lifecycle_configuration() client.delete_lifecycle_configuration(Bucket=name)
except BotoServerError as e: except (ClientError, BotoCoreError) as e:
module.fail_json(msg=e.message) module.fail_json_aws(e)
module.exit_json(changed=changed) module.exit_json(changed=changed)
def main(): def main():
argument_spec = dict(
argument_spec = ec2_argument_spec() name=dict(required=True, type='str'),
argument_spec.update( expiration_days=dict(type='int'),
dict( expiration_date=dict(),
name=dict(required=True, type='str'), noncurrent_version_expiration_days=dict(type='int'),
expiration_days=dict(default=None, required=False, type='int'), noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
expiration_date=dict(default=None, required=False, type='str'), noncurrent_version_transition_days=dict(type='int'),
prefix=dict(default=None, required=False), noncurrent_version_transitions=dict(type='list'),
requester_pays=dict(default='no', type='bool'), prefix=dict(),
rule_id=dict(required=False, type='str'), requester_pays=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']), rule_id=dict(),
status=dict(default='enabled', choices=['enabled', 'disabled']), state=dict(default='present', choices=['present', 'absent']),
storage_class=dict(default='glacier', type='str', choices=['glacier', 'standard_ia']), status=dict(default='enabled', choices=['enabled', 'disabled']),
transition_days=dict(default=None, required=False, type='int'), storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
transition_date=dict(default=None, required=False, type='str') transition_days=dict(type='int'),
) transition_date=dict(),
transitions=dict(type='list'),
purge_transitions=dict(default='yes', type='bool')
) )
module = AnsibleModule(argument_spec=argument_spec, module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[ mutually_exclusive=[
['expiration_days', 'expiration_date'], ['expiration_days', 'expiration_date'],
['expiration_days', 'transition_date'], ['expiration_days', 'transition_date'],
['transition_days', 'transition_date'], ['transition_days', 'transition_date'],
['transition_days', 'expiration_date'] ['transition_days', 'expiration_date'],
] ['transition_days', 'transitions'],
) ['transition_date', 'transitions'],
['noncurrent_version_transition_days', 'noncurrent_version_transitions'],
if not HAS_BOTO: ],)
module.fail_json(msg='boto required for this module')
if not HAS_DATEUTIL: if not HAS_DATEUTIL:
module.fail_json(msg='dateutil required for this module') module.fail_json(msg='dateutil required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module) client = module.client('s3')
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
expiration_date = module.params.get("expiration_date") expiration_date = module.params.get("expiration_date")
transition_date = module.params.get("transition_date") transition_date = module.params.get("transition_date")
state = module.params.get("state") state = module.params.get("state")
storage_class = module.params.get("storage_class")
if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
required_when_present = ('expiration_date', 'expiration_days', 'transition_date',
'transition_days', 'transitions', 'noncurrent_version_expiration_days',
'noncurrent_version_transition_days',
'noncurrent_version_transitions')
for param in required_when_present:
if module.params.get(param):
break
else:
msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
module.fail_json(msg=msg)
# If expiration_date set, check string is valid # If expiration_date set, check string is valid
if expiration_date is not None: if expiration_date is not None:
try: try:
@ -414,14 +488,10 @@ def main():
except ValueError as e: except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
boto_required_version = (2, 40, 0)
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
if state == 'present': if state == 'present':
create_lifecycle_rule(connection, module) create_lifecycle_rule(client, module)
elif state == 'absent': elif state == 'absent':
destroy_lifecycle_rule(connection, module) destroy_lifecycle_rule(client, module)
if __name__ == '__main__': if __name__ == '__main__':

@ -0,0 +1,2 @@
cloud/aws
posix/ci/cloud/group4/aws

@ -0,0 +1,436 @@
---
- block:
# ============================================================
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: true
# ============================================================
- name: Create simple s3_bucket
s3_bucket:
name: "{{ resource_prefix }}-testbucket-ansible"
state: present
<<: *aws_connection_info
register: output
- assert:
that:
- output.changed
- output.name == '{{ resource_prefix }}-testbucket-ansible'
- not output.requester_pays
# ============================================================
- name: Create a lifecycle policy
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
expiration_days: 300
prefix: /pre
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a lifecycle policy (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
expiration_days: 300
prefix: /pre
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a second lifecycle policy
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 30
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a second lifecycle policy (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 30
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Disable the second lifecycle policy
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
status: disabled
transition_days: 30
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Disable the second lifecycle policy (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
status: disabled
transition_days: 30
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Re-enable the second lifecycle policy
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
status: enabled
transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Re-enable the second lifecycle policy (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
status: enabled
transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Delete the second lifecycle policy
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
state: absent
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Delete the second lifecycle policy (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
state: absent
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a second lifecycle policy, with infrequent access
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 30
storage_class: standard_ia
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a second lifecycle policy, with infrequent access (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
storage_class: standard_ia
transition_days: 30
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a second lifecycle policy, with glacier
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a second lifecycle policy, with glacier (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a lifecycle policy with infrequent access
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 30
storage_class: standard_ia
prefix: /something
<<: *aws_connection_info
register: output
- name: Create a second lifecycle policy, with glacier
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 300
prefix: /something
purge_transitions: false
<<: *aws_connection_info
register: output
- name: Create a lifecycle policy with infrequent access (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
storage_class: standard_ia
transition_days: 30
prefix: /something
purge_transitions: false
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
- name: Create a second lifecycle policy, with glacier (idempotency)
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 300
prefix: /something
purge_transitions: false
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent expiration
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_expiration_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent expiration
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_expiration_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transition
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transitions and expirations
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transition
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_transition_days: 300
noncurrent_version_storage_class: standard_ia
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transitions and expirations
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_storage_class: standard_ia
noncurrent_version_transition_days: 300
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transitions
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_transitions:
- transition_days: 30
storage_class: standard_ia
- transition_days: 60
storage_class: onezone_ia
- transition_days: 90
storage_class: glacier
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is changed
# ============================================================
- name: Create a lifecycle policy, with noncurrent transitions
s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
noncurrent_version_transitions:
- transition_days: 30
storage_class: standard_ia
- transition_days: 60
storage_class: onezone_ia
- transition_days: 90
storage_class: glacier
prefix: /something
<<: *aws_connection_info
register: output
- assert:
that:
- output is not changed
# ============================================================
# test all the examples
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
expiration_days: 30
prefix: /logs/
status: enabled
<<: *aws_connection_info
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_days: 7
expiration_days: 90
prefix: /logs/
status: enabled
<<: *aws_connection_info
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
# Note that midnight GMT must be specified.
# Be sure to quote your date strings
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
transition_date: "2020-12-30T00:00:00.000Z"
expiration_date: "2030-12-30T00:00:00.000Z"
prefix: /logs/
status: enabled
<<: *aws_connection_info
state: present
# Disable the rule created above
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
prefix: /logs/
status: disabled
<<: *aws_connection_info
state: present
# Delete the lifecycle rule created above
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
<<: *aws_connection_info
prefix: /logs/
state: absent
# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
prefix: /backups/
storage_class: standard_ia
transition_days: 31
state: present
<<: *aws_connection_info
status: enabled
# Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
- s3_lifecycle:
name: "{{ resource_prefix }}-testbucket-ansible"
prefix: /other_logs/
state: present
<<: *aws_connection_info
status: enabled
transitions:
- transition_days: 30
storage_class: standard_ia
- transition_days: 90
storage_class: glacier
# ============================================================
always:
- name: Ensure all buckets are deleted
s3_bucket:
name: "{{item}}"
state: absent
<<: *aws_connection_info
ignore_errors: yes
with_items:
- "{{ resource_prefix }}-testbucket-ansible"
Loading…
Cancel
Save