sqs_queue - Move to boto3 and add support for various extra features (#66795)

* reworked sqs_queue

* Switch default purge_tags behaviour to false.

This matches the behaviour of ec2_tag and ecs_tag.

* Minor lint / review fixups

* Add missing AWS IAM policy for SQS tests

* Move integration tests to using module_defaults: group/aws:...

* add changelog

* Break out the 'compatability' map from our spec definition (gets flagged by the schema validation)

* Tweaks based on review

* add basic examples

* Lint fixups

* Switch out NonExistentQueue logic so it's easier to follow

* Reorder name argument options for consistency

Co-authored-by: Dennis Podkovyrin <dennis.podkovyrin@gmail.com>
pull/67841/head
Mark Chappell 6 years ago committed by GitHub
parent 75b088d6a9
commit da30e6d2e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,3 @@
minor_changes:
- 'sqs_queue: updated to use boto3 instead of boto'
- 'sqs_queue: Add support for tagging, KMS and FIFO queues'

@ -263,6 +263,18 @@
"lightsail:StopInstance" "lightsail:StopInstance"
], ],
"Resource": "arn:aws:lightsail:*:*:*" "Resource": "arn:aws:lightsail:*:*:*"
} },
{
"Sid": "AllowSQS",
"Effect": "Allow",
"Action": [
"sqs:GetQueueURL",
"sqs:CreateQueue",
"sqs:GetQueueAttributes",
"sqs:DeleteQueue",
"sqs:SetQueueAttributes"
],
"Resource": "arn:aws:sqs:*:*:*"
},
] ]
} }

@ -23,13 +23,13 @@ author:
- Alan Loi (@loia) - Alan Loi (@loia)
- Fernando Jose Pando (@nand0p) - Fernando Jose Pando (@nand0p)
- Nadir Lloret (@nadirollo) - Nadir Lloret (@nadirollo)
- Dennis Podkovyrin (@sbj-ss)
requirements: requirements:
- "boto >= 2.33.0" - boto3
options: options:
state: state:
description: description:
- Create or delete the queue. - Create or delete the queue.
required: false
choices: ['present', 'absent'] choices: ['present', 'absent']
default: 'present' default: 'present'
type: str type: str
@ -38,9 +38,19 @@ options:
- Name of the queue. - Name of the queue.
required: true required: true
type: str type: str
default_visibility_timeout: queue_type:
description:
- Standard or FIFO queue.
- I(queue_type) can only be set at queue creation and will otherwise be
ignored.
choices: ['standard', 'fifo']
default: 'standard'
version_added: "2.10"
type: str
visibility_timeout:
description: description:
- The default visibility timeout in seconds. - The default visibility timeout in seconds.
aliases: [default_visibility_timeout]
type: int type: int
message_retention_period: message_retention_period:
description: description:
@ -50,13 +60,15 @@ options:
description: description:
- The maximum message size in bytes. - The maximum message size in bytes.
type: int type: int
delivery_delay: delay_seconds:
description: description:
- The delivery delay in seconds. - The delivery delay in seconds.
aliases: [delivery_delay]
type: int type: int
receive_message_wait_time: receive_message_wait_time_seconds:
description: description:
- The receive message wait time in seconds. - The receive message wait time in seconds.
aliases: [receive_message_wait_time]
type: int type: int
policy: policy:
description: description:
@ -68,22 +80,65 @@ options:
- JSON dict with the redrive_policy (see example). - JSON dict with the redrive_policy (see example).
version_added: "2.2" version_added: "2.2"
type: dict type: dict
kms_master_key_id:
description:
- The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
version_added: "2.10"
type: str
kms_data_key_reuse_period_seconds:
description:
- The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
aliases: [kms_data_key_reuse_period]
version_added: "2.10"
type: int
content_based_deduplication:
type: bool
description: Enables content-based deduplication. Used for FIFOs only.
version_added: "2.10"
default: false
tags:
description:
- Tag dict to apply to the queue (requires botocore 1.5.40 or above).
- To remove all tags set I(tags={}) and I(purge_tags=true).
version_added: "2.10"
type: dict
purge_tags:
description:
- Remove tags not listed in I(tags).
type: bool
default: false
version_added: "2.10"
extends_documentation_fragment: extends_documentation_fragment:
- aws - aws
- ec2 - ec2
""" """
RETURN = ''' RETURN = '''
default_visibility_timeout: content_based_deduplication:
description: Enables content-based deduplication. Used for FIFOs only.
type: bool
returned: always
sample: True
visibility_timeout:
description: The default visibility timeout in seconds. description: The default visibility timeout in seconds.
type: int type: int
returned: always returned: always
sample: 30 sample: 30
delivery_delay: delay_seconds:
description: The delivery delay in seconds. description: The delivery delay in seconds.
type: int type: int
returned: always returned: always
sample: 0 sample: 0
kms_master_key_id:
description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
type: str
returned: always
sample: alias/MyAlias
kms_data_key_reuse_period_seconds:
description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
type: int
returned: always
sample: 300
maximum_message_size: maximum_message_size:
description: The maximum message size in bytes. description: The maximum message size in bytes.
type: int type: int
@ -102,9 +157,14 @@ name:
queue_arn: queue_arn:
description: The queue's Amazon resource name (ARN). description: The queue's Amazon resource name (ARN).
type: str type: str
returned: on successful creation or update of the queue returned: on success
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0' sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
receive_message_wait_time: queue_url:
description: URL to access the queue
type: str
returned: on success
sample: 'https://queue.amazonaws.com/123456789012/MyQueue'
receive_message_wait_time_seconds:
description: The receive message wait time in seconds. description: The receive message wait time in seconds.
type: int type: int
returned: always returned: always
@ -114,6 +174,11 @@ region:
type: str type: str
returned: always returned: always
sample: 'us-east-1' sample: 'us-east-1'
tags:
description: List of queue tags
type: dict
returned: always
sample: '{"Env": "prod"}'
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -131,6 +196,33 @@ EXAMPLES = '''
maxReceiveCount: 5 maxReceiveCount: 5
deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
# Drop redrive policy
- sqs_queue:
name: my-queue
region: ap-southeast-2
redrive_policy: {}
# Create FIFO queue
- sqs_queue:
name: fifo-queue
region: ap-southeast-2
queue_type: fifo
content_based_deduplication: yes
# Tag queue
- sqs_queue:
name: fifo-queue
region: ap-southeast-2
tags:
example: SomeValue
# Configure Encryption, automatically uses a new data key every hour
- sqs_queue:
name: fifo-queue
region: ap-southeast-2
kms_master_key_id: alias/MyQueueKey
kms_data_key_reuse_period_seconds: 3600
# Delete SQS queue # Delete SQS queue
- sqs_queue: - sqs_queue:
name: my-queue name: my-queue
@ -139,178 +231,250 @@ EXAMPLES = '''
''' '''
import json import json
import traceback from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, snake_dict_to_camel_dict, compare_policies
try: try:
import boto.sqs from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError
from boto.exception import BotoServerError, NoAuthHandlerFound
HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False pass # handled by AnsibleAWSModule
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_queue_name(module, is_fifo=False):
name = module.params.get('name')
if not is_fifo or name.endswith('.fifo'):
return name
return name + '.fifo'
def create_or_update_sqs_queue(connection, module):
queue_name = module.params.get('name')
queue_attributes = dict( # NonExistentQueue is explicitly expected when a queue doesn't exist
default_visibility_timeout=module.params.get('default_visibility_timeout'), @AWSRetry.jittered_backoff()
message_retention_period=module.params.get('message_retention_period'), def get_queue_url(client, name):
maximum_message_size=module.params.get('maximum_message_size'), try:
delivery_delay=module.params.get('delivery_delay'), return client.get_queue_url(QueueName=name)['QueueUrl']
receive_message_wait_time=module.params.get('receive_message_wait_time'), except ClientError as e:
policy=module.params.get('policy'), if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
redrive_policy=module.params.get('redrive_policy') return None
) raise
def describe_queue(client, queue_url):
"""
Description a queue in snake format
"""
attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
description = dict(attributes)
description.pop('Policy', None)
description.pop('RedrivePolicy', None)
description = camel_dict_to_snake_dict(description)
description['policy'] = attributes.get('Policy', None)
description['redrive_policy'] = attributes.get('RedrivePolicy', None)
# Boto3 returns everything as a string, convert them back to integers/dicts if
# that's what we expected.
for key, value in description.items():
if value is None:
continue
if key in ['policy', 'redrive_policy']:
policy = json.loads(value)
description[key] = policy
continue
if key == 'content_based_deduplication':
try:
description[key] = bool(value)
except (TypeError, ValueError):
pass
try:
if value == str(int(value)):
description[key] = int(value)
except (TypeError, ValueError):
pass
return description
def create_or_update_sqs_queue(client, module):
is_fifo = (module.params.get('queue_type') == 'fifo')
queue_name = get_queue_name(module, is_fifo)
result = dict( result = dict(
region=module.params.get('region'),
name=queue_name, name=queue_name,
region=module.params.get('region'),
changed=False,
) )
result.update(queue_attributes)
try: queue_url = get_queue_url(client, queue_name)
queue = connection.get_queue(queue_name) result['queue_url'] = queue_url
if queue:
# Update existing if not queue_url:
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes) create_attributes = {'FifoQueue': 'true'} if is_fifo else {}
else:
# Create new
if not module.check_mode:
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True result['changed'] = True
if module.check_mode:
return result
queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl']
if not module.check_mode: changed, arn = update_sqs_queue(module, client, queue_url)
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn'] result['changed'] |= changed
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout'] result['queue_arn'] = arn
result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
changed, tags = update_tags(client, queue_url, module)
result['changed'] |= changed
result['tags'] = tags
def update_sqs_queue(queue, result.update(describe_queue(client, queue_url))
check_mode=False,
default_visibility_timeout=None,
message_retention_period=None,
maximum_message_size=None,
delivery_delay=None,
receive_message_wait_time=None,
policy=None,
redrive_policy=None):
changed = False
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout, COMPATABILITY_KEYS = dict(
check_mode=check_mode) or changed delay_seconds='delivery_delay',
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period, receive_message_wait_time_seconds='receive_message_wait_time',
check_mode=check_mode) or changed visibility_timeout='default_visibility_timeout',
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size, kms_data_key_reuse_period_seconds='kms_data_key_reuse_period',
check_mode=check_mode) or changed )
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay, for key in list(result.keys()):
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'Policy', policy,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
check_mode=check_mode) or changed
return changed
def set_queue_attribute(queue, attribute, value, check_mode=False):
if not value and value != 0:
return False
try: # The return values changed between boto and boto3, add the old keys too
existing_value = queue.get_attributes(attributes=attribute)[attribute] # for backwards compatibility
except Exception: return_name = COMPATABILITY_KEYS.get(key)
existing_value = '' if return_name:
result[return_name] = result.get(key)
return result
# convert dict attributes to JSON strings (sort keys for comparing)
def update_sqs_queue(module, client, queue_url):
check_mode = module.check_mode
changed = False
existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True)
attributes_to_set = dict()
# Boto3 SQS deals with policies as strings, we want to deal with them as
# dicts
if module.params.get('policy') is not None:
policy = module.params.get('policy')
current_value = existing_attributes.get('Policy', '{}')
current_policy = json.loads(current_value)
if compare_policies(current_policy, policy):
attributes_to_set['Policy'] = json.dumps(policy)
changed = True
if module.params.get('redrive_policy') is not None:
policy = module.params.get('redrive_policy')
current_value = existing_attributes.get('RedrivePolicy', '{}')
current_policy = json.loads(current_value)
if compare_policies(current_policy, policy):
attributes_to_set['RedrivePolicy'] = json.dumps(policy)
changed = True
for attribute, value in existing_attributes.items():
# We handle these as a special case because they're IAM policies
if attribute in ['Policy', 'RedrivePolicy']: if attribute in ['Policy', 'RedrivePolicy']:
value = json.dumps(value, sort_keys=True) continue
if existing_value:
existing_value = json.dumps(json.loads(existing_value), sort_keys=True) if attribute not in new_attributes.keys():
continue
if new_attributes.get(attribute) is None:
continue
new_value = new_attributes[attribute]
if str(value) != existing_value: if isinstance(new_value, bool):
if not check_mode: new_value = str(new_value).lower()
queue.set_attribute(attribute, value) existing_value = str(existing_value).lower()
return True
return False if new_value == value:
continue
# Boto3 expects strings
attributes_to_set[attribute] = str(new_value)
changed = True
def delete_sqs_queue(connection, module): if changed and not check_mode:
queue_name = module.params.get('name') client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True)
return changed, existing_attributes.get('queue_arn'),
def delete_sqs_queue(client, module):
is_fifo = (module.params.get('queue_type') == 'fifo')
queue_name = get_queue_name(module, is_fifo)
result = dict( result = dict(
region=module.params.get('region'),
name=queue_name, name=queue_name,
region=module.params.get('region'),
changed=False
) )
try: queue_url = get_queue_url(client, queue_name)
queue = connection.get_queue(queue_name) if not queue_url:
if queue: return result
result['changed'] = bool(queue_url)
if not module.check_mode: if not module.check_mode:
connection.delete_queue(queue) AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url)
result['changed'] = True
else: return result
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def update_tags(client, queue_url, module):
new_tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
if new_tags is None:
return False, {}
def main(): try:
argument_spec = ec2_argument_spec() existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags']
argument_spec.update(dict( except (ClientError, KeyError) as e:
state=dict(default='present', choices=['present', 'absent']), existing_tags = {}
name=dict(required=True, type='str'),
default_visibility_timeout=dict(type='int'), tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
message_retention_period=dict(type='int'),
maximum_message_size=dict(type='int'),
delivery_delay=dict(type='int'),
receive_message_wait_time=dict(type='int'),
policy=dict(type='dict', required=False),
redrive_policy=dict(type='dict', required=False),
))
module = AnsibleModule( if not module.check_mode:
argument_spec=argument_spec, if tags_to_remove:
supports_check_mode=True) client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True)
if tags_to_add:
client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add)
existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {})
else:
existing_tags = new_tags
if not HAS_BOTO: changed = bool(tags_to_remove) or bool(tags_to_add)
module.fail_json(msg='boto required for this module') return changed, existing_tags
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try: def main():
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e: argument_spec = dict(
module.fail_json(msg=str(e)) state=dict(type='str', default='present', choices=['present', 'absent']),
name=dict(type='str', required=True),
queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
delay_seconds=dict(type='int', aliases=['delivery_delay']),
maximum_message_size=dict(type='int'),
message_retention_period=dict(type='int'),
policy=dict(type='dict'),
receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']),
redrive_policy=dict(type='dict'),
visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']),
kms_master_key_id=dict(type='str'),
kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period']),
content_based_deduplication=dict(type='bool'),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
state = module.params.get('state') state = module.params.get('state')
retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue'])
try:
client = module.client('sqs', retry_decorator=retry_decorator)
if state == 'present': if state == 'present':
create_or_update_sqs_queue(connection, module) result = create_or_update_sqs_queue(client, module)
elif state == 'absent': elif state == 'absent':
delete_sqs_queue(connection, module) result = delete_sqs_queue(client, module)
except (BotoCoreError, ClientError, ParamValidationError) as e:
module.fail_json_aws(e, msg='Failed to control sqs queue')
else:
module.exit_json(**result)
if __name__ == '__main__': if __name__ == '__main__':

@ -1,17 +1,16 @@
--- ---
- name: set up aws connection info - name: Main test block
set_fact: module_defaults:
aws_connection_info: &aws_connection_info group/aws:
aws_access_key: "{{ aws_access_key }}" aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}" aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}" security_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}" region: "{{ aws_region }}"
no_log: yes block:
- block: - block:
- name: Test creating SQS queue - name: Test creating SQS queue
sqs_queue: sqs_queue:
name: "{{ resource_prefix }}{{ 1000 | random }}" name: "{{ resource_prefix }}{{ 1000 | random }}"
<<: *aws_connection_info
register: create_result register: create_result
- name: Assert SQS queue created - name: Assert SQS queue created
assert: assert:
@ -23,7 +22,6 @@
sqs_queue: sqs_queue:
name: "{{ create_result.name }}" name: "{{ create_result.name }}"
state: absent state: absent
<<: *aws_connection_info
register: delete_result register: delete_result
retries: 3 retries: 3
delay: 3 delay: 3
@ -36,13 +34,12 @@
sqs_queue: sqs_queue:
name: "{{ resource_prefix }}{{ 1000 | random }}" name: "{{ resource_prefix }}{{ 1000 | random }}"
state: absent state: absent
<<: *aws_connection_info
register: delete_result register: delete_result
- name: Assert delete non-existant queue returns cleanly - name: Assert delete non-existant queue returns cleanly
assert: assert:
that: that:
- delete_result.changed == False - delete_result.changed == False
- name: Test queue features - name: Test queue features
block: block:
- name: Test create queue with attributes - name: Test create queue with attributes
sqs_queue: sqs_queue:
@ -57,36 +54,33 @@
Statement: Statement:
Effect: Allow Effect: Allow
Action: "*" Action: "*"
<<: *aws_connection_info
register: create_result register: create_result
- name: Assert queue created with configuration - name: Assert queue created with configuration
assert: assert:
that: that:
- create_result.changed - create_result.changed
- create_result.default_visibility_timeout == "900" - create_result.default_visibility_timeout == 900
- create_result.delivery_delay == "900" - create_result.delivery_delay == 900
- create_result.maximum_message_size == "9009" - create_result.maximum_message_size == 9009
- create_result.message_retention_period == "900" - create_result.message_retention_period == 900
- create_result.receive_message_wait_time == "10" - create_result.receive_message_wait_time == 10
- create_result.policy.Version == "2012-10-17" - create_result.policy.Version == "2012-10-17"
- create_result.policy.Statement.Effect == "Allow" - create_result.policy.Statement[0].Effect == "Allow"
- create_result.policy.Statement.Action == "*" - create_result.policy.Statement[0].Action == "*"
always: always:
- name: Cleaning up queue - name: Cleaning up queue
sqs_queue: sqs_queue:
name: "{{ create_result.name }}" name: "{{ create_result.name }}"
state: absent state: absent
<<: *aws_connection_info
register: delete_result register: delete_result
retries: 3 retries: 3
delay: 3 delay: 3
until: delete_result.changed until: delete_result.changed
- name: Test queue with redrive - name: Test queue with redrive
block: block:
- name: Creating dead letter queue - name: Creating dead letter queue
sqs_queue: sqs_queue:
name: "{{ resource_prefix }}{{ 1000 | random }}" name: "{{ resource_prefix }}{{ 1000 | random }}"
<<: *aws_connection_info
register: dead_letter_queue register: dead_letter_queue
- name: Test create queue with redrive_policy - name: Test create queue with redrive_policy
sqs_queue: sqs_queue:
@ -94,7 +88,6 @@
redrive_policy: redrive_policy:
maxReceiveCount: 5 maxReceiveCount: 5
deadLetterTargetArn: "{{ dead_letter_queue.queue_arn }}" deadLetterTargetArn: "{{ dead_letter_queue.queue_arn }}"
<<: *aws_connection_info
register: create_result register: create_result
- name: Assert queue created with configuration - name: Assert queue created with configuration
assert: assert:
@ -105,7 +98,6 @@
sqs_queue: sqs_queue:
name: "{{ item.name }}" name: "{{ item.name }}"
state: absent state: absent
<<: *aws_connection_info
register: delete_result register: delete_result
retries: 3 retries: 3
delay: 3 delay: 3

Loading…
Cancel
Save