mirror of https://github.com/ansible/ansible.git
Removed unused test/support modules (#76048)
parent
817e1045bb
commit
80f4b67f43
@ -1,925 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'core'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: aws_s3
|
||||
short_description: manage objects in S3.
|
||||
description:
|
||||
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
|
||||
deleting both objects and buckets, retrieving objects as files or strings and generating download links.
|
||||
This module has a dependency on boto3 and botocore.
|
||||
notes:
|
||||
- In 2.4, this module has been renamed from C(s3) into M(aws_s3).
|
||||
version_added: "1.1"
|
||||
options:
|
||||
bucket:
|
||||
description:
|
||||
- Bucket name.
|
||||
required: true
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- The destination file path when downloading an object/key with a GET operation.
|
||||
version_added: "1.3"
|
||||
type: path
|
||||
encrypt:
|
||||
description:
|
||||
- When set for PUT mode, asks for server-side encryption.
|
||||
default: true
|
||||
version_added: "2.0"
|
||||
type: bool
|
||||
encryption_mode:
|
||||
description:
|
||||
- What encryption mode to use if I(encrypt=true).
|
||||
default: AES256
|
||||
choices:
|
||||
- AES256
|
||||
- aws:kms
|
||||
version_added: "2.7"
|
||||
type: str
|
||||
expiry:
|
||||
description:
|
||||
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
|
||||
default: 600
|
||||
aliases: ['expiration']
|
||||
type: int
|
||||
headers:
|
||||
description:
|
||||
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
|
||||
version_added: "2.0"
|
||||
type: dict
|
||||
marker:
|
||||
description:
|
||||
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
|
||||
version_added: "2.0"
|
||||
type: str
|
||||
max_keys:
|
||||
description:
|
||||
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
|
||||
default: 1000
|
||||
version_added: "2.0"
|
||||
type: int
|
||||
metadata:
|
||||
description:
|
||||
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
|
||||
version_added: "1.6"
|
||||
type: dict
|
||||
mode:
|
||||
description:
|
||||
- Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
|
||||
getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
|
||||
and delobj (delete object, Ansible 2.0+).
|
||||
required: true
|
||||
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
|
||||
type: str
|
||||
object:
|
||||
description:
|
||||
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
|
||||
type: str
|
||||
permission:
|
||||
description:
|
||||
- This option lets the user set the canned permissions on the object/bucket that are created.
|
||||
The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
|
||||
C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
|
||||
C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
|
||||
default: ['private']
|
||||
version_added: "2.0"
|
||||
type: list
|
||||
elements: str
|
||||
prefix:
|
||||
description:
|
||||
- Limits the response to keys that begin with the specified prefix for list mode.
|
||||
default: ""
|
||||
version_added: "2.0"
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
|
||||
version_added: "2.0"
|
||||
type: str
|
||||
overwrite:
|
||||
description:
|
||||
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
|
||||
Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
|
||||
When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
|
||||
The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
|
||||
U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
|
||||
default: 'always'
|
||||
aliases: ['force']
|
||||
version_added: "1.2"
|
||||
type: str
|
||||
retries:
|
||||
description:
|
||||
- On recoverable failure, how many times to retry before actually failing.
|
||||
default: 0
|
||||
version_added: "2.0"
|
||||
type: int
|
||||
aliases: ['retry']
|
||||
s3_url:
|
||||
description:
|
||||
- S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
|
||||
aliases: [ S3_URL ]
|
||||
type: str
|
||||
dualstack:
|
||||
description:
|
||||
- Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
|
||||
- Requires at least botocore version 1.4.45.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: "2.7"
|
||||
rgw:
|
||||
description:
|
||||
- Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
|
||||
default: false
|
||||
version_added: "2.2"
|
||||
type: bool
|
||||
src:
|
||||
description:
|
||||
- The source file path when performing a PUT operation.
|
||||
version_added: "1.3"
|
||||
type: str
|
||||
ignore_nonexistent_bucket:
|
||||
description:
|
||||
- "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
|
||||
GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
|
||||
I(ignore_nonexistent_bucket=true)."
|
||||
version_added: "2.3"
|
||||
type: bool
|
||||
encryption_kms_key_id:
|
||||
description:
|
||||
- KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
|
||||
version_added: "2.7"
|
||||
type: str
|
||||
requirements: [ "boto3", "botocore" ]
|
||||
author:
|
||||
- "Lester Wade (@lwade)"
|
||||
- "Sloane Hertel (@s-hertel)"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Simple PUT operation
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
src: /usr/local/myfile.txt
|
||||
mode: put
|
||||
|
||||
- name: Simple PUT operation in Ceph RGW S3
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
src: /usr/local/myfile.txt
|
||||
mode: put
|
||||
rgw: true
|
||||
s3_url: "http://localhost:8000"
|
||||
|
||||
- name: Simple GET operation
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
dest: /usr/local/myfile.txt
|
||||
mode: get
|
||||
|
||||
- name: Get a specific version of an object.
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
version: 48c9ee5131af7a716edc22df9772aa6f
|
||||
dest: /usr/local/myfile.txt
|
||||
mode: get
|
||||
|
||||
- name: PUT/upload with metadata
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
src: /usr/local/myfile.txt
|
||||
mode: put
|
||||
metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
|
||||
|
||||
- name: PUT/upload with custom headers
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
src: /usr/local/myfile.txt
|
||||
mode: put
|
||||
headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
|
||||
|
||||
- name: List keys simple
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
mode: list
|
||||
|
||||
- name: List keys all options
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
mode: list
|
||||
prefix: /my/desired/
|
||||
marker: /my/desired/0023.txt
|
||||
max_keys: 472
|
||||
|
||||
- name: Create an empty bucket
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
mode: create
|
||||
permission: public-read
|
||||
|
||||
- name: Create a bucket with key as directory, in the EU region
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/directory/path
|
||||
mode: create
|
||||
region: eu-west-1
|
||||
|
||||
- name: Delete a bucket and all contents
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
mode: delete
|
||||
|
||||
- name: GET an object but don't download if the file checksums match. New in 2.0
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
dest: /usr/local/myfile.txt
|
||||
mode: get
|
||||
overwrite: different
|
||||
|
||||
- name: Delete an object from a bucket
|
||||
aws_s3:
|
||||
bucket: mybucket
|
||||
object: /my/desired/key.txt
|
||||
mode: delobj
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message indicating the status of the operation.
|
||||
returned: always
|
||||
type: str
|
||||
sample: PUT operation complete
|
||||
url:
|
||||
description: URL of the object.
|
||||
returned: (for put and geturl operations)
|
||||
type: str
|
||||
sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
|
||||
expiry:
|
||||
description: Number of seconds the presigned url is valid for.
|
||||
returned: (for geturl operation)
|
||||
type: int
|
||||
sample: 600
|
||||
contents:
|
||||
description: Contents of the object as string.
|
||||
returned: (for getstr operation)
|
||||
type: str
|
||||
sample: "Hello, world!"
|
||||
s3_keys:
|
||||
description: List of object keys.
|
||||
returned: (for list operation)
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- prefix1/
|
||||
- prefix1/key1
|
||||
- prefix1/key2
|
||||
'''
|
||||
|
||||
import mimetypes
|
||||
import os
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ssl import SSLError
|
||||
from ansible.module_utils.basic import to_text, to_native
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
|
||||
|
||||
try:
|
||||
import botocore
|
||||
except ImportError:
|
||||
pass # will be detected by imported AnsibleAWSModule
|
||||
|
||||
IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
|
||||
|
||||
|
||||
class Sigv4Required(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def key_check(module, s3, bucket, obj, version=None, validate=True):
|
||||
exists = True
|
||||
try:
|
||||
if version:
|
||||
s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
|
||||
else:
|
||||
s3.head_object(Bucket=bucket, Key=obj)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
# if a client error is thrown, check if it's a 404 error
|
||||
# if it's a 404 error, then the object does not exist
|
||||
error_code = int(e.response['Error']['Code'])
|
||||
if error_code == 404:
|
||||
exists = False
|
||||
elif error_code == 403 and validate is False:
|
||||
pass
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
|
||||
return exists
|
||||
|
||||
|
||||
def etag_compare(module, local_file, s3, bucket, obj, version=None):
|
||||
s3_etag = get_etag(s3, bucket, obj, version=version)
|
||||
local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
|
||||
|
||||
return s3_etag == local_etag
|
||||
|
||||
|
||||
def get_etag(s3, bucket, obj, version=None):
|
||||
if version:
|
||||
key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
|
||||
else:
|
||||
key_check = s3.head_object(Bucket=bucket, Key=obj)
|
||||
if not key_check:
|
||||
return None
|
||||
return key_check['ETag']
|
||||
|
||||
|
||||
def bucket_check(module, s3, bucket, validate=True):
|
||||
exists = True
|
||||
try:
|
||||
s3.head_bucket(Bucket=bucket)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
# If a client error is thrown, then check that it was a 404 error.
|
||||
# If it was a 404 error, then the bucket does not exist.
|
||||
error_code = int(e.response['Error']['Code'])
|
||||
if error_code == 404:
|
||||
exists = False
|
||||
elif error_code == 403 and validate is False:
|
||||
pass
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
|
||||
except botocore.exceptions.EndpointConnectionError as e:
|
||||
module.fail_json_aws(e, msg="Invalid endpoint provided")
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
|
||||
return exists
|
||||
|
||||
|
||||
def create_bucket(module, s3, bucket, location=None):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
|
||||
configuration = {}
|
||||
if location not in ('us-east-1', None):
|
||||
configuration['LocationConstraint'] = location
|
||||
try:
|
||||
if len(configuration) > 0:
|
||||
s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
|
||||
else:
|
||||
s3.create_bucket(Bucket=bucket)
|
||||
if module.params.get('permission'):
|
||||
# Wait for the bucket to exist before setting ACLs
|
||||
s3.get_waiter('bucket_exists').wait(Bucket=bucket)
|
||||
for acl in module.params.get('permission'):
|
||||
s3.put_bucket_acl(ACL=acl, Bucket=bucket)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
|
||||
module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
|
||||
|
||||
if bucket:
|
||||
return True
|
||||
|
||||
|
||||
def paginated_list(s3, **pagination_params):
|
||||
pg = s3.get_paginator('list_objects_v2')
|
||||
for page in pg.paginate(**pagination_params):
|
||||
yield [data['Key'] for data in page.get('Contents', [])]
|
||||
|
||||
|
||||
def paginated_versioned_list_with_fallback(s3, **pagination_params):
|
||||
try:
|
||||
versioned_pg = s3.get_paginator('list_object_versions')
|
||||
for page in versioned_pg.paginate(**pagination_params):
|
||||
delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
|
||||
current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
|
||||
yield delete_markers + current_objects
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
|
||||
for page in paginated_list(s3, **pagination_params):
|
||||
yield [{'Key': data['Key']} for data in page]
|
||||
|
||||
|
||||
def list_keys(module, s3, bucket, prefix, marker, max_keys):
|
||||
pagination_params = {'Bucket': bucket}
|
||||
for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
|
||||
pagination_params[param_name] = param_value
|
||||
try:
|
||||
keys = sum(paginated_list(s3, **pagination_params), [])
|
||||
module.exit_json(msg="LIST operation complete", s3_keys=keys)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
|
||||
|
||||
|
||||
def delete_bucket(module, s3, bucket):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
|
||||
try:
|
||||
exists = bucket_check(module, s3, bucket)
|
||||
if exists is False:
|
||||
return False
|
||||
# if there are contents then we need to delete them before we can delete the bucket
|
||||
for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
|
||||
if keys:
|
||||
s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
|
||||
s3.delete_bucket(Bucket=bucket)
|
||||
return True
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
|
||||
|
||||
|
||||
def delete_key(module, s3, bucket, obj):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
|
||||
try:
|
||||
s3.delete_object(Bucket=bucket, Key=obj)
|
||||
module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
|
||||
|
||||
|
||||
def create_dirkey(module, s3, bucket, obj, encrypt):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
|
||||
try:
|
||||
params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
|
||||
if encrypt:
|
||||
params['ServerSideEncryption'] = module.params['encryption_mode']
|
||||
if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
|
||||
params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
|
||||
|
||||
s3.put_object(**params)
|
||||
for acl in module.params.get('permission'):
|
||||
s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
|
||||
module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
|
||||
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
|
||||
|
||||
|
||||
def path_check(path):
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def option_in_extra_args(option):
|
||||
temp_option = option.replace('-', '').lower()
|
||||
|
||||
allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
|
||||
'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
|
||||
'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
|
||||
'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
|
||||
'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
|
||||
'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
|
||||
'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
|
||||
|
||||
if temp_option in allowed_extra_args:
|
||||
return allowed_extra_args[temp_option]
|
||||
|
||||
|
||||
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
|
||||
try:
|
||||
extra = {}
|
||||
if encrypt:
|
||||
extra['ServerSideEncryption'] = module.params['encryption_mode']
|
||||
if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
|
||||
extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
|
||||
if metadata:
|
||||
extra['Metadata'] = {}
|
||||
|
||||
# determine object metadata and extra arguments
|
||||
for option in metadata:
|
||||
extra_args_option = option_in_extra_args(option)
|
||||
if extra_args_option is not None:
|
||||
extra[extra_args_option] = metadata[option]
|
||||
else:
|
||||
extra['Metadata'][option] = metadata[option]
|
||||
|
||||
if 'ContentType' not in extra:
|
||||
content_type = mimetypes.guess_type(src)[0]
|
||||
if content_type is None:
|
||||
# s3 default content type
|
||||
content_type = 'binary/octet-stream'
|
||||
extra['ContentType'] = content_type
|
||||
|
||||
s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Unable to complete PUT operation.")
|
||||
try:
|
||||
for acl in module.params.get('permission'):
|
||||
s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
|
||||
module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Unable to set object ACL")
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Unable to set object ACL")
|
||||
try:
|
||||
url = s3.generate_presigned_url(ClientMethod='put_object',
|
||||
Params={'Bucket': bucket, 'Key': obj},
|
||||
ExpiresIn=expiry)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Unable to generate presigned URL")
|
||||
module.exit_json(msg="PUT operation complete", url=url, changed=True)
|
||||
|
||||
|
||||
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
|
||||
# retries is the number of loops; range/xrange needs to be one
|
||||
# more to get that count of loops.
|
||||
try:
|
||||
if version:
|
||||
key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
|
||||
else:
|
||||
key = s3.get_object(Bucket=bucket, Key=obj)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
|
||||
raise Sigv4Required()
|
||||
elif e.response['Error']['Code'] not in ("403", "404"):
|
||||
# AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
|
||||
# user does not have the s3:GetObject permission. 404 errors are handled by download_file().
|
||||
module.fail_json_aws(e, msg="Could not find the key %s." % obj)
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Could not find the key %s." % obj)
|
||||
|
||||
optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
|
||||
for x in range(0, retries + 1):
|
||||
try:
|
||||
s3.download_file(bucket, obj, dest, **optional_kwargs)
|
||||
module.exit_json(msg="GET operation complete", changed=True)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
# actually fail on last pass through the loop.
|
||||
if x >= retries:
|
||||
module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
|
||||
# otherwise, try again, this may be a transient timeout.
|
||||
except SSLError as e: # will ClientError catch SSLError?
|
||||
# actually fail on last pass through the loop.
|
||||
if x >= retries:
|
||||
module.fail_json_aws(e, msg="s3 download failed")
|
||||
# otherwise, try again, this may be a transient timeout.
|
||||
|
||||
|
||||
def download_s3str(module, s3, bucket, obj, version=None, validate=True):
|
||||
if module.check_mode:
|
||||
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
|
||||
try:
|
||||
if version:
|
||||
contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
|
||||
else:
|
||||
contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
|
||||
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
|
||||
raise Sigv4Required()
|
||||
else:
|
||||
module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
|
||||
except botocore.exceptions.BotoCoreError as e:
|
||||
module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
|
||||
|
||||
|
||||
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
|
||||
try:
|
||||
url = s3.generate_presigned_url(ClientMethod='get_object',
|
||||
Params={'Bucket': bucket, 'Key': obj},
|
||||
ExpiresIn=expiry)
|
||||
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while getting download url.")
|
||||
|
||||
|
||||
def is_fakes3(s3_url):
|
||||
""" Return True if s3_url has scheme fakes3:// """
|
||||
if s3_url is not None:
|
||||
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
|
||||
if s3_url and rgw: # TODO - test this
|
||||
rgw = urlparse(s3_url)
|
||||
params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||
elif is_fakes3(s3_url):
|
||||
fakes3 = urlparse(s3_url)
|
||||
port = fakes3.port
|
||||
if fakes3.scheme == 'fakes3s':
|
||||
protocol = "https"
|
||||
if port is None:
|
||||
port = 443
|
||||
else:
|
||||
protocol = "http"
|
||||
if port is None:
|
||||
port = 80
|
||||
params = dict(module=module, conn_type='client', resource='s3', region=location,
|
||||
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
|
||||
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
|
||||
else:
|
||||
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||
if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
|
||||
params['config'] = botocore.client.Config(signature_version='s3v4')
|
||||
elif module.params['mode'] in ('get', 'getstr') and sig_4:
|
||||
params['config'] = botocore.client.Config(signature_version='s3v4')
|
||||
if module.params['dualstack']:
|
||||
dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
|
||||
if 'config' in params:
|
||||
params['config'] = params['config'].merge(dualconf)
|
||||
else:
|
||||
params['config'] = dualconf
|
||||
return boto3_conn(**params)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
bucket=dict(required=True),
|
||||
dest=dict(default=None, type='path'),
|
||||
encrypt=dict(default=True, type='bool'),
|
||||
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
|
||||
expiry=dict(default=600, type='int', aliases=['expiration']),
|
||||
headers=dict(type='dict'),
|
||||
marker=dict(default=""),
|
||||
max_keys=dict(default=1000, type='int'),
|
||||
metadata=dict(type='dict'),
|
||||
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
|
||||
object=dict(),
|
||||
permission=dict(type='list', default=['private']),
|
||||
version=dict(default=None),
|
||||
overwrite=dict(aliases=['force'], default='always'),
|
||||
prefix=dict(default=""),
|
||||
retries=dict(aliases=['retry'], type='int', default=0),
|
||||
s3_url=dict(aliases=['S3_URL']),
|
||||
dualstack=dict(default='no', type='bool'),
|
||||
rgw=dict(default='no', type='bool'),
|
||||
src=dict(),
|
||||
ignore_nonexistent_bucket=dict(default=False, type='bool'),
|
||||
encryption_kms_key_id=dict()
|
||||
)
|
||||
module = AnsibleAWSModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[['mode', 'put', ['src', 'object']],
|
||||
['mode', 'get', ['dest', 'object']],
|
||||
['mode', 'getstr', ['object']],
|
||||
['mode', 'geturl', ['object']]],
|
||||
)
|
||||
|
||||
bucket = module.params.get('bucket')
|
||||
encrypt = module.params.get('encrypt')
|
||||
expiry = module.params.get('expiry')
|
||||
dest = module.params.get('dest', '')
|
||||
headers = module.params.get('headers')
|
||||
marker = module.params.get('marker')
|
||||
max_keys = module.params.get('max_keys')
|
||||
metadata = module.params.get('metadata')
|
||||
mode = module.params.get('mode')
|
||||
obj = module.params.get('object')
|
||||
version = module.params.get('version')
|
||||
overwrite = module.params.get('overwrite')
|
||||
prefix = module.params.get('prefix')
|
||||
retries = module.params.get('retries')
|
||||
s3_url = module.params.get('s3_url')
|
||||
dualstack = module.params.get('dualstack')
|
||||
rgw = module.params.get('rgw')
|
||||
src = module.params.get('src')
|
||||
ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
|
||||
|
||||
object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
|
||||
bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
overwrite = 'never'
|
||||
|
||||
if overwrite == 'different' and not HAS_MD5:
|
||||
module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region in ('us-east-1', '', None):
|
||||
# default to US Standard region
|
||||
location = 'us-east-1'
|
||||
else:
|
||||
# Boto uses symbolic names for locations but region strings will
|
||||
# actually work fine for everything except us-east-1 (US Standard)
|
||||
location = region
|
||||
|
||||
if module.params.get('object'):
|
||||
obj = module.params['object']
|
||||
# If there is a top level object, do nothing - if the object starts with /
|
||||
# remove the leading character to maintain compatibility with Ansible versions < 2.4
|
||||
if obj.startswith('/'):
|
||||
obj = obj[1:]
|
||||
|
||||
# Bucket deletion does not require obj. Prevents ambiguity with delobj.
|
||||
if obj and mode == "delete":
|
||||
module.fail_json(msg='Parameter obj cannot be used with mode=delete')
|
||||
|
||||
# allow eucarc environment variables to be used if ansible vars aren't set
|
||||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
|
||||
module.fail_json(msg='dualstack only applies to AWS S3')
|
||||
|
||||
if dualstack and not module.botocore_at_least('1.4.45'):
|
||||
module.fail_json(msg='dualstack requires botocore >= 1.4.45')
|
||||
|
||||
# rgw requires an explicit url
|
||||
if rgw and not s3_url:
|
||||
module.fail_json(msg='rgw flavour requires s3_url')
|
||||
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to RGW, Walrus or fakes3
|
||||
if s3_url:
|
||||
for key in ['validate_certs', 'security_token', 'profile_name']:
|
||||
aws_connect_kwargs.pop(key, None)
|
||||
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
|
||||
|
||||
validate = not ignore_nonexistent_bucket
|
||||
|
||||
# separate types of ACLs
|
||||
bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
|
||||
object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
|
||||
error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
|
||||
if error_acl:
|
||||
module.fail_json(msg='Unknown permission specified: %s' % error_acl)
|
||||
|
||||
# First, we check to see if the bucket exists, we get "bucket" returned.
|
||||
bucketrtn = bucket_check(module, s3, bucket, validate=validate)
|
||||
|
||||
if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
|
||||
module.fail_json(msg="Source bucket cannot be found.")
|
||||
|
||||
if mode == 'get':
|
||||
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
|
||||
if keyrtn is False:
|
||||
if version:
|
||||
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
|
||||
else:
|
||||
module.fail_json(msg="Key %s does not exist." % obj)
|
||||
|
||||
if path_check(dest) and overwrite != 'always':
|
||||
if overwrite == 'never':
|
||||
module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
|
||||
if etag_compare(module, dest, s3, bucket, obj, version=version):
|
||||
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
|
||||
|
||||
try:
|
||||
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
|
||||
except Sigv4Required:
|
||||
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
|
||||
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
|
||||
|
||||
if mode == 'put':
|
||||
|
||||
# if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
|
||||
# these were separated into the variables bucket_acl and object_acl above
|
||||
|
||||
if not path_check(src):
|
||||
module.fail_json(msg="Local object for PUT does not exist")
|
||||
|
||||
if bucketrtn:
|
||||
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
|
||||
else:
|
||||
# If the bucket doesn't exist we should create it.
|
||||
# only use valid bucket acls for create_bucket function
|
||||
module.params['permission'] = bucket_acl
|
||||
create_bucket(module, s3, bucket, location)
|
||||
|
||||
if keyrtn and overwrite != 'always':
|
||||
if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
|
||||
# Return the download URL for the existing object
|
||||
get_download_url(module, s3, bucket, obj, expiry, changed=False)
|
||||
|
||||
# only use valid object acls for the upload_s3file function
|
||||
module.params['permission'] = object_acl
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
|
||||
# Delete an object from a bucket, not the entire bucket
|
||||
if mode == 'delobj':
|
||||
if obj is None:
|
||||
module.fail_json(msg="object parameter is required")
|
||||
if bucket:
|
||||
deletertn = delete_key(module, s3, bucket, obj)
|
||||
if deletertn is True:
|
||||
module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
|
||||
else:
|
||||
module.fail_json(msg="Bucket parameter is required.")
|
||||
|
||||
# Delete an entire bucket, including all objects in the bucket
|
||||
if mode == 'delete':
|
||||
if bucket:
|
||||
deletertn = delete_bucket(module, s3, bucket)
|
||||
if deletertn is True:
|
||||
module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
|
||||
else:
|
||||
module.fail_json(msg="Bucket parameter is required.")
|
||||
|
||||
# Support for listing a set of keys
|
||||
if mode == 'list':
|
||||
exists = bucket_check(module, s3, bucket)
|
||||
|
||||
# If the bucket does not exist then bail out
|
||||
if not exists:
|
||||
module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
|
||||
|
||||
list_keys(module, s3, bucket, prefix, marker, max_keys)
|
||||
|
||||
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
|
||||
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
|
||||
if mode == 'create':
|
||||
|
||||
# if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
|
||||
# these were separated above into the variables bucket_acl and object_acl
|
||||
|
||||
if bucket and not obj:
|
||||
if bucketrtn:
|
||||
module.exit_json(msg="Bucket already exists.", changed=False)
|
||||
else:
|
||||
# only use valid bucket acls when creating the bucket
|
||||
module.params['permission'] = bucket_acl
|
||||
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
|
||||
if bucket and obj:
|
||||
if obj.endswith('/'):
|
||||
dirobj = obj
|
||||
else:
|
||||
dirobj = obj + "/"
|
||||
if bucketrtn:
|
||||
if key_check(module, s3, bucket, dirobj):
|
||||
module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
|
||||
else:
|
||||
# setting valid object acls for the create_dirkey function
|
||||
module.params['permission'] = object_acl
|
||||
create_dirkey(module, s3, bucket, dirobj, encrypt)
|
||||
else:
|
||||
# only use valid bucket acls for the create_bucket function
|
||||
module.params['permission'] = bucket_acl
|
||||
created = create_bucket(module, s3, bucket, location)
|
||||
# only use valid object acls for the create_dirkey function
|
||||
module.params['permission'] = object_acl
|
||||
create_dirkey(module, s3, bucket, dirobj, encrypt)
|
||||
|
||||
# Support for grabbing the time-expired URL for an object in S3/Walrus.
|
||||
if mode == 'geturl':
|
||||
if not bucket and not obj:
|
||||
module.fail_json(msg="Bucket and Object parameters must be set")
|
||||
|
||||
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
|
||||
if keyrtn:
|
||||
get_download_url(module, s3, bucket, obj, expiry)
|
||||
else:
|
||||
module.fail_json(msg="Key %s does not exist." % obj)
|
||||
|
||||
if mode == 'getstr':
|
||||
if bucket and obj:
|
||||
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
|
||||
if keyrtn:
|
||||
try:
|
||||
download_s3str(module, s3, bucket, obj, version=version)
|
||||
except Sigv4Required:
|
||||
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
|
||||
download_s3str(module, s3, bucket, obj, version=version)
|
||||
elif version is not None:
|
||||
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
|
||||
else:
|
||||
module.fail_json(msg="Key %s does not exist." % obj)
|
||||
|
||||
module.exit_json(failed=False)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,237 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: locale_gen
|
||||
short_description: Creates or removes locales
|
||||
description:
|
||||
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
|
||||
version_added: "1.6"
|
||||
author:
|
||||
- Augustus Kling (@AugustusKling)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name and encoding of the locale, such as "en_GB.UTF-8".
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the locale shall be present.
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure a locale exists
|
||||
locale_gen:
|
||||
name: de_CH.UTF-8
|
||||
state: present
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
from subprocess import Popen, PIPE, call
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
LOCALE_NORMALIZATION = {
|
||||
".utf8": ".UTF-8",
|
||||
".eucjp": ".EUC-JP",
|
||||
".iso885915": ".ISO-8859-15",
|
||||
".cp1251": ".CP1251",
|
||||
".koi8r": ".KOI8-R",
|
||||
".armscii8": ".ARMSCII-8",
|
||||
".euckr": ".EUC-KR",
|
||||
".gbk": ".GBK",
|
||||
".gb18030": ".GB18030",
|
||||
".euctw": ".EUC-TW",
|
||||
}
|
||||
|
||||
|
||||
# ===========================================
|
||||
# location module specific support methods.
|
||||
#
|
||||
|
||||
def is_available(name, ubuntuMode):
|
||||
"""Check if the given locale is available on the system. This is done by
|
||||
checking either :
|
||||
* if the locale is present in /etc/locales.gen
|
||||
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
|
||||
if ubuntuMode:
|
||||
__regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
|
||||
__locales_available = '/usr/share/i18n/SUPPORTED'
|
||||
else:
|
||||
__regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
|
||||
__locales_available = '/etc/locale.gen'
|
||||
|
||||
re_compiled = re.compile(__regexp)
|
||||
fd = open(__locales_available, 'r')
|
||||
for line in fd:
|
||||
result = re_compiled.match(line)
|
||||
if result and result.group('locale') == name:
|
||||
return True
|
||||
fd.close()
|
||||
return False
|
||||
|
||||
|
||||
def is_present(name):
|
||||
"""Checks if the given locale is currently installed."""
|
||||
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
|
||||
output = to_native(output)
|
||||
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
|
||||
|
||||
|
||||
def fix_case(name):
|
||||
"""locale -a might return the encoding in either lower or upper case.
|
||||
Passing through this function makes them uniform for comparisons."""
|
||||
for s, r in LOCALE_NORMALIZATION.items():
|
||||
name = name.replace(s, r)
|
||||
return name
|
||||
|
||||
|
||||
def replace_line(existing_line, new_line):
|
||||
"""Replaces lines in /etc/locale.gen"""
|
||||
try:
|
||||
f = open("/etc/locale.gen", "r")
|
||||
lines = [line.replace(existing_line, new_line) for line in f]
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
f = open("/etc/locale.gen", "w")
|
||||
f.write("".join(lines))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def set_locale(name, enabled=True):
|
||||
""" Sets the state of the locale. Defaults to enabled. """
|
||||
search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
|
||||
if enabled:
|
||||
new_string = r'%s \g<charset>' % (name)
|
||||
else:
|
||||
new_string = r'# %s \g<charset>' % (name)
|
||||
try:
|
||||
f = open("/etc/locale.gen", "r")
|
||||
lines = [re.sub(search_string, new_string, line) for line in f]
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
f = open("/etc/locale.gen", "w")
|
||||
f.write("".join(lines))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def apply_change(targetState, name):
|
||||
"""Create or remove locale.
|
||||
|
||||
Keyword arguments:
|
||||
targetState -- Desired state, either present or absent.
|
||||
name -- Name including encoding such as de_CH.UTF-8.
|
||||
"""
|
||||
if targetState == "present":
|
||||
# Create locale.
|
||||
set_locale(name, enabled=True)
|
||||
else:
|
||||
# Delete locale.
|
||||
set_locale(name, enabled=False)
|
||||
|
||||
localeGenExitValue = call("locale-gen")
|
||||
if localeGenExitValue != 0:
|
||||
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
|
||||
|
||||
|
||||
def apply_change_ubuntu(targetState, name):
|
||||
"""Create or remove locale.
|
||||
|
||||
Keyword arguments:
|
||||
targetState -- Desired state, either present or absent.
|
||||
name -- Name including encoding such as de_CH.UTF-8.
|
||||
"""
|
||||
if targetState == "present":
|
||||
# Create locale.
|
||||
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
|
||||
localeGenExitValue = call(["locale-gen", name])
|
||||
else:
|
||||
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
|
||||
try:
|
||||
f = open("/var/lib/locales/supported.d/local", "r")
|
||||
content = f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
f = open("/var/lib/locales/supported.d/local", "w")
|
||||
for line in content:
|
||||
locale, charset = line.split(' ')
|
||||
if locale != name:
|
||||
f.write(line)
|
||||
finally:
|
||||
f.close()
|
||||
# Purge locales and regenerate.
|
||||
# Please provide a patch if you know how to avoid regenerating the locales to keep!
|
||||
localeGenExitValue = call(["locale-gen", "--purge"])
|
||||
|
||||
if localeGenExitValue != 0:
|
||||
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
||||
if not os.path.exists("/etc/locale.gen"):
|
||||
if os.path.exists("/var/lib/locales/supported.d/"):
|
||||
# Ubuntu created its own system to manage locales.
|
||||
ubuntuMode = True
|
||||
else:
|
||||
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
|
||||
else:
|
||||
# We found the common way to manage locales.
|
||||
ubuntuMode = False
|
||||
|
||||
if not is_available(name, ubuntuMode):
|
||||
module.fail_json(msg="The locale you've entered is not available "
|
||||
"on your system.")
|
||||
|
||||
if is_present(name):
|
||||
prev_state = "present"
|
||||
else:
|
||||
prev_state = "absent"
|
||||
changed = (prev_state != state)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=changed)
|
||||
else:
|
||||
if changed:
|
||||
try:
|
||||
if ubuntuMode is False:
|
||||
apply_change(state, name)
|
||||
else:
|
||||
apply_change_ubuntu(state, name)
|
||||
except EnvironmentError as e:
|
||||
module.fail_json(msg=to_native(e), exitValue=e.errno)
|
||||
|
||||
module.exit_json(name=name, changed=changed, msg="OK")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,295 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
|
||||
# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Alexander Bulimov (@abulimov)
|
||||
module: lvg
|
||||
short_description: Configure LVM volume groups
|
||||
description:
|
||||
- This module creates, removes or resizes volume groups.
|
||||
version_added: "1.1"
|
||||
options:
|
||||
vg:
|
||||
description:
|
||||
- The name of the volume group.
|
||||
type: str
|
||||
required: true
|
||||
pvs:
|
||||
description:
|
||||
- List of comma-separated devices to use as physical devices in this volume group.
|
||||
- Required when creating or resizing volume group.
|
||||
- The module will take care of running pvcreate if needed.
|
||||
type: list
|
||||
pesize:
|
||||
description:
|
||||
- "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
|
||||
(where the sector size is the largest sector size of the PVs currently used in the VG),
|
||||
or at least 128KiB."
|
||||
- Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
|
||||
type: str
|
||||
default: "4"
|
||||
pv_options:
|
||||
description:
|
||||
- Additional options to pass to C(pvcreate) when creating the volume group.
|
||||
type: str
|
||||
version_added: "2.4"
|
||||
vg_options:
|
||||
description:
|
||||
- Additional options to pass to C(vgcreate) when creating the volume group.
|
||||
type: str
|
||||
version_added: "1.6"
|
||||
state:
|
||||
description:
|
||||
- Control if the volume group exists.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
force:
|
||||
description:
|
||||
- If C(yes), allows to remove volume group with logical volumes.
|
||||
type: bool
|
||||
default: no
|
||||
seealso:
|
||||
- module: filesystem
|
||||
- module: lvol
|
||||
- module: parted
|
||||
notes:
|
||||
- This module does not modify PE size for already present volume group.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
|
||||
lvg:
|
||||
vg: vg.services
|
||||
pvs: /dev/sda1
|
||||
pesize: 32
|
||||
|
||||
- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
|
||||
lvg:
|
||||
vg: vg.services
|
||||
pvs: /dev/sdb
|
||||
pesize: 128K
|
||||
|
||||
# If, for example, we already have VG vg.services on top of /dev/sdb1,
|
||||
# this VG will be extended by /dev/sdc5. Or if vg.services was created on
|
||||
# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
|
||||
# and then reduce by /dev/sda5.
|
||||
- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
|
||||
lvg:
|
||||
vg: vg.services
|
||||
pvs: /dev/sdb1,/dev/sdc5
|
||||
|
||||
- name: Remove a volume group with name vg.services
|
||||
lvg:
|
||||
vg: vg.services
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import itertools
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def parse_vgs(data):
|
||||
vgs = []
|
||||
for line in data.splitlines():
|
||||
parts = line.strip().split(';')
|
||||
vgs.append({
|
||||
'name': parts[0],
|
||||
'pv_count': int(parts[1]),
|
||||
'lv_count': int(parts[2]),
|
||||
})
|
||||
return vgs
|
||||
|
||||
|
||||
def find_mapper_device_name(module, dm_device):
|
||||
dmsetup_cmd = module.get_bin_path('dmsetup', True)
|
||||
mapper_prefix = '/dev/mapper/'
|
||||
rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
|
||||
mapper_device = mapper_prefix + dm_name.rstrip()
|
||||
return mapper_device
|
||||
|
||||
|
||||
def parse_pvs(module, data):
|
||||
pvs = []
|
||||
dm_prefix = '/dev/dm-'
|
||||
for line in data.splitlines():
|
||||
parts = line.strip().split(';')
|
||||
if parts[0].startswith(dm_prefix):
|
||||
parts[0] = find_mapper_device_name(module, parts[0])
|
||||
pvs.append({
|
||||
'name': parts[0],
|
||||
'vg_name': parts[1],
|
||||
})
|
||||
return pvs
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
vg=dict(type='str', required=True),
|
||||
pvs=dict(type='list'),
|
||||
pesize=dict(type='str', default='4'),
|
||||
pv_options=dict(type='str', default=''),
|
||||
vg_options=dict(type='str', default=''),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
force=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vg = module.params['vg']
|
||||
state = module.params['state']
|
||||
force = module.boolean(module.params['force'])
|
||||
pesize = module.params['pesize']
|
||||
pvoptions = module.params['pv_options'].split()
|
||||
vgoptions = module.params['vg_options'].split()
|
||||
|
||||
dev_list = []
|
||||
if module.params['pvs']:
|
||||
dev_list = list(module.params['pvs'])
|
||||
elif state == 'present':
|
||||
module.fail_json(msg="No physical volumes given.")
|
||||
|
||||
# LVM always uses real paths not symlinks so replace symlinks with actual path
|
||||
for idx, dev in enumerate(dev_list):
|
||||
dev_list[idx] = os.path.realpath(dev)
|
||||
|
||||
if state == 'present':
|
||||
# check given devices
|
||||
for test_dev in dev_list:
|
||||
if not os.path.exists(test_dev):
|
||||
module.fail_json(msg="Device %s not found." % test_dev)
|
||||
|
||||
# get pv list
|
||||
pvs_cmd = module.get_bin_path('pvs', True)
|
||||
if dev_list:
|
||||
pvs_filter_pv_name = ' || '.join(
|
||||
'pv_name = {0}'.format(x)
|
||||
for x in itertools.chain(dev_list, module.params['pvs'])
|
||||
)
|
||||
pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
|
||||
pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
|
||||
else:
|
||||
pvs_filter = ''
|
||||
rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
|
||||
|
||||
# check pv for devices
|
||||
pvs = parse_pvs(module, current_pvs)
|
||||
used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
|
||||
if used_pvs:
|
||||
module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
|
||||
|
||||
vgs_cmd = module.get_bin_path('vgs', True)
|
||||
rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
|
||||
|
||||
changed = False
|
||||
|
||||
vgs = parse_vgs(current_vgs)
|
||||
|
||||
for test_vg in vgs:
|
||||
if test_vg['name'] == vg:
|
||||
this_vg = test_vg
|
||||
break
|
||||
else:
|
||||
this_vg = None
|
||||
|
||||
if this_vg is None:
|
||||
if state == 'present':
|
||||
# create VG
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
# create PV
|
||||
pvcreate_cmd = module.get_bin_path('pvcreate', True)
|
||||
for current_dev in dev_list:
|
||||
rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
|
||||
vgcreate_cmd = module.get_bin_path('vgcreate')
|
||||
rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
|
||||
else:
|
||||
if state == 'absent':
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
if this_vg['lv_count'] == 0 or force:
|
||||
# remove VG
|
||||
vgremove_cmd = module.get_bin_path('vgremove', True)
|
||||
rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
|
||||
else:
|
||||
module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
|
||||
|
||||
# resize VG
|
||||
current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
|
||||
devs_to_remove = list(set(current_devs) - set(dev_list))
|
||||
devs_to_add = list(set(dev_list) - set(current_devs))
|
||||
|
||||
if devs_to_add or devs_to_remove:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
if devs_to_add:
|
||||
devs_to_add_string = ' '.join(devs_to_add)
|
||||
# create PV
|
||||
pvcreate_cmd = module.get_bin_path('pvcreate', True)
|
||||
for current_dev in devs_to_add:
|
||||
rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
|
||||
# add PV to our VG
|
||||
vgextend_cmd = module.get_bin_path('vgextend', True)
|
||||
rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
|
||||
|
||||
# remove some PV from our VG
|
||||
if devs_to_remove:
|
||||
devs_to_remove_string = ' '.join(devs_to_remove)
|
||||
vgreduce_cmd = module.get_bin_path('vgreduce', True)
|
||||
rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,223 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
|
||||
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
|
||||
# Sponsored by E.T.A.I. http://www.etai.fr/
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: mongodb_parameter
|
||||
short_description: Change an administrative parameter on a MongoDB server
|
||||
description:
|
||||
- Change an administrative parameter on a MongoDB server.
|
||||
version_added: "2.1"
|
||||
options:
|
||||
login_user:
|
||||
description:
|
||||
- The MongoDB username used to authenticate with.
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The login user's password used to authenticate with.
|
||||
type: str
|
||||
login_host:
|
||||
description:
|
||||
- The host running the database.
|
||||
type: str
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- The MongoDB port to connect to.
|
||||
default: 27017
|
||||
type: int
|
||||
login_database:
|
||||
description:
|
||||
- The database where login credentials are stored.
|
||||
type: str
|
||||
replica_set:
|
||||
description:
|
||||
- Replica set to connect to (automatically connects to primary for writes).
|
||||
type: str
|
||||
ssl:
|
||||
description:
|
||||
- Whether to use an SSL connection when connecting to the database.
|
||||
type: bool
|
||||
default: no
|
||||
param:
|
||||
description:
|
||||
- MongoDB administrative parameter to modify.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- MongoDB administrative parameter value to set.
|
||||
type: str
|
||||
required: true
|
||||
param_type:
|
||||
description:
|
||||
- Define the type of parameter value.
|
||||
default: str
|
||||
type: str
|
||||
choices: [int, str]
|
||||
|
||||
notes:
|
||||
- Requires the pymongo Python package on the remote host, version 2.4.2+.
|
||||
- This can be installed using pip or the OS package manager.
|
||||
- See also U(http://api.mongodb.org/python/current/installation.html)
|
||||
requirements: [ "pymongo" ]
|
||||
author: "Loic Blot (@nerzhul)"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Set MongoDB syncdelay to 60 (this is an int)
|
||||
mongodb_parameter:
|
||||
param: syncdelay
|
||||
value: 60
|
||||
param_type: int
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
before:
|
||||
description: value before modification
|
||||
returned: success
|
||||
type: str
|
||||
after:
|
||||
description: value after modification
|
||||
returned: success
|
||||
type: str
|
||||
'''
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from pymongo.errors import ConnectionFailure
|
||||
from pymongo.errors import OperationFailure
|
||||
from pymongo import version as PyMongoVersion
|
||||
from pymongo import MongoClient
|
||||
except ImportError:
|
||||
try: # for older PyMongo 2.2
|
||||
from pymongo import Connection as MongoClient
|
||||
except ImportError:
|
||||
pymongo_found = False
|
||||
else:
|
||||
pymongo_found = True
|
||||
else:
|
||||
pymongo_found = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# =========================================
|
||||
# MongoDB module specific support methods.
|
||||
#
|
||||
|
||||
def load_mongocnf():
|
||||
config = configparser.RawConfigParser()
|
||||
mongocnf = os.path.expanduser('~/.mongodb.cnf')
|
||||
|
||||
try:
|
||||
config.readfp(open(mongocnf))
|
||||
creds = dict(
|
||||
user=config.get('client', 'user'),
|
||||
password=config.get('client', 'pass')
|
||||
)
|
||||
except (configparser.NoOptionError, IOError):
|
||||
return False
|
||||
|
||||
return creds
|
||||
|
||||
|
||||
# =========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None, no_log=True),
|
||||
login_host=dict(default='localhost'),
|
||||
login_port=dict(default=27017, type='int'),
|
||||
login_database=dict(default=None),
|
||||
replica_set=dict(default=None),
|
||||
param=dict(required=True),
|
||||
value=dict(required=True),
|
||||
param_type=dict(default="str", choices=['str', 'int']),
|
||||
ssl=dict(default=False, type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
if not pymongo_found:
|
||||
module.fail_json(msg=missing_required_lib('pymongo'))
|
||||
|
||||
login_user = module.params['login_user']
|
||||
login_password = module.params['login_password']
|
||||
login_host = module.params['login_host']
|
||||
login_port = module.params['login_port']
|
||||
login_database = module.params['login_database']
|
||||
|
||||
replica_set = module.params['replica_set']
|
||||
ssl = module.params['ssl']
|
||||
|
||||
param = module.params['param']
|
||||
param_type = module.params['param_type']
|
||||
value = module.params['value']
|
||||
|
||||
# Verify parameter is coherent with specified type
|
||||
try:
|
||||
if param_type == 'int':
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
module.fail_json(msg="value '%s' is not %s" % (value, param_type))
|
||||
|
||||
try:
|
||||
if replica_set:
|
||||
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
|
||||
else:
|
||||
client = MongoClient(login_host, int(login_port), ssl=ssl)
|
||||
|
||||
if login_user is None and login_password is None:
|
||||
mongocnf_creds = load_mongocnf()
|
||||
if mongocnf_creds is not False:
|
||||
login_user = mongocnf_creds['user']
|
||||
login_password = mongocnf_creds['password']
|
||||
elif login_password is None or login_user is None:
|
||||
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
|
||||
|
||||
if login_user is not None and login_password is not None:
|
||||
client.admin.authenticate(login_user, login_password, source=login_database)
|
||||
|
||||
except ConnectionFailure as e:
|
||||
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
db = client.admin
|
||||
|
||||
try:
|
||||
after_value = db.command("setParameter", **{param: value})
|
||||
except OperationFailure as e:
|
||||
module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if "was" not in after_value:
|
||||
module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
|
||||
else:
|
||||
module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
|
||||
after=value)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,474 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
|
||||
# Sponsored by Four Kitchens http://fourkitchens.com.
|
||||
# (c) 2014, Epic Games, Inc.
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: mongodb_user
|
||||
short_description: Adds or removes a user from a MongoDB database
|
||||
description:
|
||||
- Adds or removes a user from a MongoDB database.
|
||||
version_added: "1.1"
|
||||
options:
|
||||
login_user:
|
||||
description:
|
||||
- The MongoDB username used to authenticate with.
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The login user's password used to authenticate with.
|
||||
type: str
|
||||
login_host:
|
||||
description:
|
||||
- The host running the database.
|
||||
default: localhost
|
||||
type: str
|
||||
login_port:
|
||||
description:
|
||||
- The MongoDB port to connect to.
|
||||
default: '27017'
|
||||
type: str
|
||||
login_database:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- The database where login credentials are stored.
|
||||
type: str
|
||||
replica_set:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- Replica set to connect to (automatically connects to primary for writes).
|
||||
type: str
|
||||
database:
|
||||
description:
|
||||
- The name of the database to add/remove the user from.
|
||||
required: true
|
||||
type: str
|
||||
aliases: [db]
|
||||
name:
|
||||
description:
|
||||
- The name of the user to add or remove.
|
||||
required: true
|
||||
aliases: [user]
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to use for the user.
|
||||
type: str
|
||||
aliases: [pass]
|
||||
ssl:
|
||||
version_added: "1.8"
|
||||
description:
|
||||
- Whether to use an SSL connection when connecting to the database.
|
||||
type: bool
|
||||
ssl_cert_reqs:
|
||||
version_added: "2.2"
|
||||
description:
|
||||
- Specifies whether a certificate is required from the other side of the connection,
|
||||
and whether it will be validated if provided.
|
||||
default: CERT_REQUIRED
|
||||
choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
|
||||
type: str
|
||||
roles:
|
||||
version_added: "1.3"
|
||||
type: list
|
||||
elements: raw
|
||||
description:
|
||||
- >
|
||||
The database user roles valid values could either be one or more of the following strings:
|
||||
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
|
||||
'dbAdminAnyDatabase'
|
||||
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
|
||||
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
|
||||
state:
|
||||
description:
|
||||
- The database user state.
|
||||
default: present
|
||||
choices: [absent, present]
|
||||
type: str
|
||||
update_password:
|
||||
default: always
|
||||
choices: [always, on_create]
|
||||
version_added: "2.1"
|
||||
description:
|
||||
- C(always) will update passwords if they differ.
|
||||
- C(on_create) will only set the password for newly created users.
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
|
||||
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
|
||||
requirements: [ "pymongo" ]
|
||||
author:
|
||||
- "Elliott Foster (@elliotttf)"
|
||||
- "Julien Thebault (@Lujeni)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create 'burgers' database user with name 'bob' and password '12345'.
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: bob
|
||||
password: 12345
|
||||
state: present
|
||||
|
||||
- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: bob
|
||||
password: 12345
|
||||
state: present
|
||||
ssl: True
|
||||
|
||||
- name: Delete 'burgers' database user with name 'bob'.
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: bob
|
||||
state: absent
|
||||
|
||||
- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: ben
|
||||
password: 12345
|
||||
roles: read
|
||||
state: present
|
||||
|
||||
- name: Define roles
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: jim
|
||||
password: 12345
|
||||
roles: readWrite,dbAdmin,userAdmin
|
||||
state: present
|
||||
|
||||
- name: Define roles
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: joe
|
||||
password: 12345
|
||||
roles: readWriteAnyDatabase
|
||||
state: present
|
||||
|
||||
- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
|
||||
mongodb_user:
|
||||
database: burgers
|
||||
name: bob
|
||||
replica_set: belcher
|
||||
password: 12345
|
||||
roles: readWriteAnyDatabase
|
||||
state: present
|
||||
|
||||
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
|
||||
# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
|
||||
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
|
||||
# This syntax requires mongodb 2.6+ and pymongo 2.5+
|
||||
- name: Roles as a dictionary
|
||||
mongodb_user:
|
||||
login_user: root
|
||||
login_password: root_password
|
||||
database: admin
|
||||
user: oplog_reader
|
||||
password: oplog_reader_password
|
||||
state: present
|
||||
replica_set: belcher
|
||||
roles:
|
||||
- db: local
|
||||
role: read
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
user:
|
||||
description: The name of the user to add or remove.
|
||||
returned: success
|
||||
type: str
|
||||
'''
|
||||
|
||||
import os
|
||||
import ssl as ssl_lib
|
||||
import traceback
|
||||
from ansible.module_utils.compat.version import LooseVersion
|
||||
from operator import itemgetter
|
||||
|
||||
try:
|
||||
from pymongo.errors import ConnectionFailure
|
||||
from pymongo.errors import OperationFailure
|
||||
from pymongo import version as PyMongoVersion
|
||||
from pymongo import MongoClient
|
||||
except ImportError:
|
||||
try: # for older PyMongo 2.2
|
||||
from pymongo import Connection as MongoClient
|
||||
except ImportError:
|
||||
pymongo_found = False
|
||||
else:
|
||||
pymongo_found = True
|
||||
else:
|
||||
pymongo_found = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six import binary_type, text_type
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# =========================================
|
||||
# MongoDB module specific support methods.
|
||||
#
|
||||
|
||||
def check_compatibility(module, client):
|
||||
"""Check the compatibility between the driver and the database.
|
||||
|
||||
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
|
||||
|
||||
Args:
|
||||
module: Ansible module.
|
||||
client (cursor): Mongodb cursor on admin database.
|
||||
"""
|
||||
loose_srv_version = LooseVersion(client.server_info()['version'])
|
||||
loose_driver_version = LooseVersion(PyMongoVersion)
|
||||
|
||||
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
|
||||
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
|
||||
|
||||
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
|
||||
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
|
||||
|
||||
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
|
||||
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
|
||||
|
||||
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
|
||||
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
|
||||
|
||||
|
||||
def user_find(client, user, db_name):
|
||||
"""Check if the user exists.
|
||||
|
||||
Args:
|
||||
client (cursor): Mongodb cursor on admin database.
|
||||
user (str): User to check.
|
||||
db_name (str): User's database.
|
||||
|
||||
Returns:
|
||||
dict: when user exists, False otherwise.
|
||||
"""
|
||||
for mongo_user in client["admin"].system.users.find():
|
||||
if mongo_user['user'] == user:
|
||||
# NOTE: there is no 'db' field in mongo 2.4.
|
||||
if 'db' not in mongo_user:
|
||||
return mongo_user
|
||||
|
||||
if mongo_user["db"] == db_name:
|
||||
return mongo_user
|
||||
return False
|
||||
|
||||
|
||||
def user_add(module, client, db_name, user, password, roles):
|
||||
# pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
|
||||
# without reproducing a lot of the logic in database.py of pymongo
|
||||
db = client[db_name]
|
||||
|
||||
if roles is None:
|
||||
db.add_user(user, password, False)
|
||||
else:
|
||||
db.add_user(user, password, None, roles=roles)
|
||||
|
||||
|
||||
def user_remove(module, client, db_name, user):
|
||||
exists = user_find(client, user, db_name)
|
||||
if exists:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, user=user)
|
||||
db = client[db_name]
|
||||
db.remove_user(user)
|
||||
else:
|
||||
module.exit_json(changed=False, user=user)
|
||||
|
||||
|
||||
def load_mongocnf():
|
||||
config = configparser.RawConfigParser()
|
||||
mongocnf = os.path.expanduser('~/.mongodb.cnf')
|
||||
|
||||
try:
|
||||
config.readfp(open(mongocnf))
|
||||
creds = dict(
|
||||
user=config.get('client', 'user'),
|
||||
password=config.get('client', 'pass')
|
||||
)
|
||||
except (configparser.NoOptionError, IOError):
|
||||
return False
|
||||
|
||||
return creds
|
||||
|
||||
|
||||
def check_if_roles_changed(uinfo, roles, db_name):
|
||||
# We must be aware of users which can read the oplog on a replicaset
|
||||
# Such users must have access to the local DB, but since this DB does not store users credentials
|
||||
# and is not synchronized among replica sets, the user must be stored on the admin db
|
||||
# Therefore their structure is the following :
|
||||
# {
|
||||
# "_id" : "admin.oplog_reader",
|
||||
# "user" : "oplog_reader",
|
||||
# "db" : "admin", # <-- admin DB
|
||||
# "roles" : [
|
||||
# {
|
||||
# "role" : "read",
|
||||
# "db" : "local" # <-- local DB
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
def make_sure_roles_are_a_list_of_dict(roles, db_name):
|
||||
output = list()
|
||||
for role in roles:
|
||||
if isinstance(role, (binary_type, text_type)):
|
||||
new_role = {"role": role, "db": db_name}
|
||||
output.append(new_role)
|
||||
else:
|
||||
output.append(role)
|
||||
return output
|
||||
|
||||
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
|
||||
uinfo_roles = uinfo.get('roles', [])
|
||||
|
||||
if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# =========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None, no_log=True),
|
||||
login_host=dict(default='localhost'),
|
||||
login_port=dict(default='27017'),
|
||||
login_database=dict(default=None),
|
||||
replica_set=dict(default=None),
|
||||
database=dict(required=True, aliases=['db']),
|
||||
name=dict(required=True, aliases=['user']),
|
||||
password=dict(aliases=['pass'], no_log=True),
|
||||
ssl=dict(default=False, type='bool'),
|
||||
roles=dict(default=None, type='list', elements='raw'),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
update_password=dict(default="always", choices=["always", "on_create"]),
|
||||
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not pymongo_found:
|
||||
module.fail_json(msg=missing_required_lib('pymongo'))
|
||||
|
||||
login_user = module.params['login_user']
|
||||
login_password = module.params['login_password']
|
||||
login_host = module.params['login_host']
|
||||
login_port = module.params['login_port']
|
||||
login_database = module.params['login_database']
|
||||
|
||||
replica_set = module.params['replica_set']
|
||||
db_name = module.params['database']
|
||||
user = module.params['name']
|
||||
password = module.params['password']
|
||||
ssl = module.params['ssl']
|
||||
roles = module.params['roles'] or []
|
||||
state = module.params['state']
|
||||
update_password = module.params['update_password']
|
||||
|
||||
try:
|
||||
connection_params = {
|
||||
"host": login_host,
|
||||
"port": int(login_port),
|
||||
}
|
||||
|
||||
if replica_set:
|
||||
connection_params["replicaset"] = replica_set
|
||||
|
||||
if ssl:
|
||||
connection_params["ssl"] = ssl
|
||||
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
|
||||
|
||||
client = MongoClient(**connection_params)
|
||||
|
||||
# NOTE: this check must be done ASAP.
|
||||
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
|
||||
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
|
||||
check_compatibility(module, client)
|
||||
|
||||
if login_user is None and login_password is None:
|
||||
mongocnf_creds = load_mongocnf()
|
||||
if mongocnf_creds is not False:
|
||||
login_user = mongocnf_creds['user']
|
||||
login_password = mongocnf_creds['password']
|
||||
elif login_password is None or login_user is None:
|
||||
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
|
||||
|
||||
if login_user is not None and login_password is not None:
|
||||
client.admin.authenticate(login_user, login_password, source=login_database)
|
||||
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
|
||||
if db_name != "admin":
|
||||
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
|
||||
# else: this has to be the first admin user added
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if state == 'present':
|
||||
if password is None and update_password == 'always':
|
||||
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
|
||||
|
||||
try:
|
||||
if update_password != 'always':
|
||||
uinfo = user_find(client, user, db_name)
|
||||
if uinfo:
|
||||
password = None
|
||||
if not check_if_roles_changed(uinfo, roles, db_name):
|
||||
module.exit_json(changed=False, user=user)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, user=user)
|
||||
|
||||
user_add(module, client, db_name, user, password, roles)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
try:
|
||||
client.close()
|
||||
except Exception:
|
||||
pass
|
||||
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
|
||||
# newuinfo = user_find(client, user, db_name)
|
||||
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
|
||||
# module.exit_json(changed=False, user=user)
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
user_remove(module, client, db_name, user)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
try:
|
||||
client.close()
|
||||
except Exception:
|
||||
pass
|
||||
module.exit_json(changed=True, user=user)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright: (c) 2019, Saranya Sridharan
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: pids
|
||||
version_added: 2.8
|
||||
description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
|
||||
short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
|
||||
author:
|
||||
- Saranya Sridharan (@saranyasridharan)
|
||||
requirements:
|
||||
- psutil(python module)
|
||||
options:
|
||||
name:
|
||||
description: the name of the process you want to get PID for.
|
||||
required: true
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Pass the process name
|
||||
- name: Getting process IDs of the process
|
||||
pids:
|
||||
name: python
|
||||
register: pids_of_python
|
||||
|
||||
- name: Printing the process IDs obtained
|
||||
debug:
|
||||
msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
pids:
|
||||
description: Process IDs of the given process
|
||||
returned: list of none, one, or more process IDs
|
||||
type: list
|
||||
sample: [100,200]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
try:
|
||||
import psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
HAS_PSUTIL = False
|
||||
|
||||
|
||||
def compare_lower(a, b):
|
||||
if a is None or b is None:
|
||||
# this could just be "return False" but would lead to surprising behavior if both a and b are None
|
||||
return a == b
|
||||
|
||||
return a.lower() == b.lower()
|
||||
|
||||
|
||||
def get_pid(name):
|
||||
pids = []
|
||||
|
||||
for proc in psutil.process_iter(attrs=['name', 'cmdline']):
|
||||
if compare_lower(proc.info['name'], name) or \
|
||||
proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
|
||||
pids.append(proc.pid)
|
||||
|
||||
return pids
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True, type="str"),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
|
||||
name = module.params["name"]
|
||||
response = dict(pids=get_pid(name))
|
||||
module.exit_json(**response)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,657 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_db
|
||||
short_description: Add or remove PostgreSQL databases from a remote host.
|
||||
description:
|
||||
- Add or remove PostgreSQL databases from a remote host.
|
||||
version_added: '0.6'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the database to add or remove
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ db ]
|
||||
port:
|
||||
description:
|
||||
- Database port to connect (if needed)
|
||||
type: int
|
||||
default: 5432
|
||||
aliases:
|
||||
- login_port
|
||||
owner:
|
||||
description:
|
||||
- Name of the role to set as owner of the database
|
||||
type: str
|
||||
template:
|
||||
description:
|
||||
- Template used to create the database
|
||||
type: str
|
||||
encoding:
|
||||
description:
|
||||
- Encoding of the database
|
||||
type: str
|
||||
lc_collate:
|
||||
description:
|
||||
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
|
||||
type: str
|
||||
lc_ctype:
|
||||
description:
|
||||
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
|
||||
is used as template.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
version_added: '2.8'
|
||||
state:
|
||||
description:
|
||||
- The database state.
|
||||
- C(present) implies that the database should be created if necessary.
|
||||
- C(absent) implies that the database should be removed if present.
|
||||
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
|
||||
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
|
||||
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
|
||||
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
|
||||
pg_dump returns rc 1 in this case.
|
||||
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
|
||||
- The format of the backup will be detected based on the target name.
|
||||
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
|
||||
- Supported formats for dump and restore include C(.sql) and C(.tar)
|
||||
type: str
|
||||
choices: [ absent, dump, present, restore ]
|
||||
default: present
|
||||
target:
|
||||
description:
|
||||
- File to back up or restore from.
|
||||
- Used when I(state) is C(dump) or C(restore).
|
||||
type: path
|
||||
version_added: '2.4'
|
||||
target_opts:
|
||||
description:
|
||||
- Further arguments for pg_dump or pg_restore.
|
||||
- Used when I(state) is C(dump) or C(restore).
|
||||
type: str
|
||||
version_added: '2.4'
|
||||
maintenance_db:
|
||||
description:
|
||||
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
|
||||
type: str
|
||||
default: postgres
|
||||
version_added: '2.5'
|
||||
conn_limit:
|
||||
description:
|
||||
- Specifies the database connection limit.
|
||||
type: str
|
||||
version_added: '2.8'
|
||||
tablespace:
|
||||
description:
|
||||
- The tablespace to set for the database
|
||||
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
|
||||
- If you want to move the database back to the default tablespace,
|
||||
explicitly set this to pg_default.
|
||||
type: path
|
||||
version_added: '2.9'
|
||||
dump_extra_args:
|
||||
description:
|
||||
- Provides additional arguments when I(state) is C(dump).
|
||||
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
|
||||
type: str
|
||||
version_added: '2.10'
|
||||
seealso:
|
||||
- name: CREATE DATABASE reference
|
||||
description: Complete reference of the CREATE DATABASE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
|
||||
- name: DROP DATABASE reference
|
||||
description: Complete reference of the DROP DATABASE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
|
||||
- name: pg_dump reference
|
||||
description: Complete reference of pg_dump documentation.
|
||||
link: https://www.postgresql.org/docs/current/app-pgdump.html
|
||||
- name: pg_restore reference
|
||||
description: Complete reference of pg_restore documentation.
|
||||
link: https://www.postgresql.org/docs/current/app-pgrestore.html
|
||||
- module: postgresql_tablespace
|
||||
- module: postgresql_info
|
||||
- module: postgresql_ping
|
||||
notes:
|
||||
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
|
||||
author: "Ansible Core Team"
|
||||
extends_documentation_fragment:
|
||||
- postgres
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a new database with name "acme"
|
||||
postgresql_db:
|
||||
name: acme
|
||||
|
||||
# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
|
||||
- name: Create a new database with name "acme" and specific encoding and locale # settings.
|
||||
postgresql_db:
|
||||
name: acme
|
||||
encoding: UTF-8
|
||||
lc_collate: de_DE.UTF-8
|
||||
lc_ctype: de_DE.UTF-8
|
||||
template: template0
|
||||
|
||||
# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
|
||||
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
|
||||
postgresql_db:
|
||||
name: acme
|
||||
conn_limit: "100"
|
||||
|
||||
- name: Dump an existing database to a file
|
||||
postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
|
||||
- name: Dump an existing database to a file excluding the test table
|
||||
postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
dump_extra_args: --exclude-table=test
|
||||
|
||||
- name: Dump an existing database to a file (with compression)
|
||||
postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql.gz
|
||||
|
||||
- name: Dump a single schema for an existing database
|
||||
postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
target_opts: "-n public"
|
||||
|
||||
# Note: In the example below, if database foo exists and has another tablespace
|
||||
# the tablespace will be changed to foo. Access to the database will be locked
|
||||
# until the copying of database files is finished.
|
||||
- name: Create a new database called foo in tablespace bar
|
||||
postgresql_db:
|
||||
name: foo
|
||||
tablespace: bar
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
executed_commands:
|
||||
description: List of commands which tried to run.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["CREATE DATABASE acme"]
|
||||
version_added: '2.10'
|
||||
'''
|
||||
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
except ImportError:
|
||||
HAS_PSYCOPG2 = False
|
||||
else:
|
||||
HAS_PSYCOPG2 = True
|
||||
|
||||
import ansible.module_utils.postgres as pgutils
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
executed_commands = []
|
||||
|
||||
|
||||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def set_owner(cursor, db, owner):
|
||||
query = 'ALTER DATABASE %s OWNER TO "%s"' % (
|
||||
pg_quote_identifier(db, 'database'),
|
||||
owner)
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def set_conn_limit(cursor, db, conn_limit):
|
||||
query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
|
||||
pg_quote_identifier(db, 'database'),
|
||||
conn_limit)
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def get_encoding_id(cursor, encoding):
|
||||
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
|
||||
cursor.execute(query, {'encoding': encoding})
|
||||
return cursor.fetchone()['encoding_id']
|
||||
|
||||
|
||||
def get_db_info(cursor, db):
|
||||
query = """
|
||||
SELECT rolname AS owner,
|
||||
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
|
||||
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
|
||||
spcname AS tablespace
|
||||
FROM pg_database
|
||||
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
|
||||
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
|
||||
WHERE datname = %(db)s
|
||||
"""
|
||||
cursor.execute(query, {'db': db})
|
||||
return cursor.fetchone()
|
||||
|
||||
|
||||
def db_exists(cursor, db):
|
||||
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
|
||||
cursor.execute(query, {'db': db})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def db_delete(cursor, db):
|
||||
if db_exists(cursor, db):
|
||||
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
|
||||
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
|
||||
if not db_exists(cursor, db):
|
||||
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
|
||||
if owner:
|
||||
query_fragments.append('OWNER "%s"' % owner)
|
||||
if template:
|
||||
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
|
||||
if encoding:
|
||||
query_fragments.append('ENCODING %(enc)s')
|
||||
if lc_collate:
|
||||
query_fragments.append('LC_COLLATE %(collate)s')
|
||||
if lc_ctype:
|
||||
query_fragments.append('LC_CTYPE %(ctype)s')
|
||||
if tablespace:
|
||||
query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
|
||||
if conn_limit:
|
||||
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
query = ' '.join(query_fragments)
|
||||
executed_commands.append(cursor.mogrify(query, params))
|
||||
cursor.execute(query, params)
|
||||
return True
|
||||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
raise NotSupportedError(
|
||||
'Changing database encoding is not supported. '
|
||||
'Current encoding: %s' % db_info['encoding']
|
||||
)
|
||||
elif lc_collate and lc_collate != db_info['lc_collate']:
|
||||
raise NotSupportedError(
|
||||
'Changing LC_COLLATE is not supported. '
|
||||
'Current LC_COLLATE: %s' % db_info['lc_collate']
|
||||
)
|
||||
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
|
||||
raise NotSupportedError(
|
||||
'Changing LC_CTYPE is not supported.'
|
||||
'Current LC_CTYPE: %s' % db_info['lc_ctype']
|
||||
)
|
||||
else:
|
||||
changed = False
|
||||
|
||||
if owner and owner != db_info['owner']:
|
||||
changed = set_owner(cursor, db, owner)
|
||||
|
||||
if conn_limit and conn_limit != str(db_info['conn_limit']):
|
||||
changed = set_conn_limit(cursor, db, conn_limit)
|
||||
|
||||
if tablespace and tablespace != db_info['tablespace']:
|
||||
changed = set_tablespace(cursor, db, tablespace)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
|
||||
if not db_exists(cursor, db):
|
||||
return False
|
||||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
return False
|
||||
elif lc_collate and lc_collate != db_info['lc_collate']:
|
||||
return False
|
||||
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
|
||||
return False
|
||||
elif owner and owner != db_info['owner']:
|
||||
return False
|
||||
elif conn_limit and conn_limit != str(db_info['conn_limit']):
|
||||
return False
|
||||
elif tablespace and tablespace != db_info['tablespace']:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def db_dump(module, target, target_opts="",
|
||||
db=None,
|
||||
dump_extra_args=None,
|
||||
user=None,
|
||||
password=None,
|
||||
host=None,
|
||||
port=None,
|
||||
**kw):
|
||||
|
||||
flags = login_flags(db, host, port, user, db_prefix=False)
|
||||
cmd = module.get_bin_path('pg_dump', True)
|
||||
comp_prog_path = None
|
||||
|
||||
if os.path.splitext(target)[-1] == '.tar':
|
||||
flags.append(' --format=t')
|
||||
elif os.path.splitext(target)[-1] == '.pgc':
|
||||
flags.append(' --format=c')
|
||||
if os.path.splitext(target)[-1] == '.gz':
|
||||
if module.get_bin_path('pigz'):
|
||||
comp_prog_path = module.get_bin_path('pigz', True)
|
||||
else:
|
||||
comp_prog_path = module.get_bin_path('gzip', True)
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
comp_prog_path = module.get_bin_path('bzip2', True)
|
||||
elif os.path.splitext(target)[-1] == '.xz':
|
||||
comp_prog_path = module.get_bin_path('xz', True)
|
||||
|
||||
cmd += "".join(flags)
|
||||
|
||||
if dump_extra_args:
|
||||
cmd += " {0} ".format(dump_extra_args)
|
||||
|
||||
if target_opts:
|
||||
cmd += " {0} ".format(target_opts)
|
||||
|
||||
if comp_prog_path:
|
||||
# Use a fifo to be notified of an error in pg_dump
|
||||
# Using shell pipe has no way to return the code of the first command
|
||||
# in a portable way.
|
||||
fifo = os.path.join(module.tmpdir, 'pg_fifo')
|
||||
os.mkfifo(fifo)
|
||||
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
|
||||
else:
|
||||
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
|
||||
|
||||
return do_with_password(module, cmd, password)
|
||||
|
||||
|
||||
def db_restore(module, target, target_opts="",
|
||||
db=None,
|
||||
user=None,
|
||||
password=None,
|
||||
host=None,
|
||||
port=None,
|
||||
**kw):
|
||||
|
||||
flags = login_flags(db, host, port, user)
|
||||
comp_prog_path = None
|
||||
cmd = module.get_bin_path('psql', True)
|
||||
|
||||
if os.path.splitext(target)[-1] == '.sql':
|
||||
flags.append(' --file={0}'.format(target))
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.tar':
|
||||
flags.append(' --format=Tar')
|
||||
cmd = module.get_bin_path('pg_restore', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.pgc':
|
||||
flags.append(' --format=Custom')
|
||||
cmd = module.get_bin_path('pg_restore', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.gz':
|
||||
comp_prog_path = module.get_bin_path('zcat', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
comp_prog_path = module.get_bin_path('bzcat', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.xz':
|
||||
comp_prog_path = module.get_bin_path('xzcat', True)
|
||||
|
||||
cmd += "".join(flags)
|
||||
if target_opts:
|
||||
cmd += " {0} ".format(target_opts)
|
||||
|
||||
if comp_prog_path:
|
||||
env = os.environ.copy()
|
||||
if password:
|
||||
env = {"PGPASSWORD": password}
|
||||
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
|
||||
(stdout2, stderr2) = p2.communicate()
|
||||
p1.stdout.close()
|
||||
p1.wait()
|
||||
if p1.returncode != 0:
|
||||
stderr1 = p1.stderr.read()
|
||||
return p1.returncode, '', stderr1, 'cmd: ****'
|
||||
else:
|
||||
return p2.returncode, '', stderr2, 'cmd: ****'
|
||||
else:
|
||||
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
|
||||
|
||||
return do_with_password(module, cmd, password)
|
||||
|
||||
|
||||
def login_flags(db, host, port, user, db_prefix=True):
|
||||
"""
|
||||
returns a list of connection argument strings each prefixed
|
||||
with a space and quoted where necessary to later be combined
|
||||
in a single shell string with `"".join(rv)`
|
||||
|
||||
db_prefix determines if "--dbname" is prefixed to the db argument,
|
||||
since the argument was introduced in 9.3.
|
||||
"""
|
||||
flags = []
|
||||
if db:
|
||||
if db_prefix:
|
||||
flags.append(' --dbname={0}'.format(shlex_quote(db)))
|
||||
else:
|
||||
flags.append(' {0}'.format(shlex_quote(db)))
|
||||
if host:
|
||||
flags.append(' --host={0}'.format(host))
|
||||
if port:
|
||||
flags.append(' --port={0}'.format(port))
|
||||
if user:
|
||||
flags.append(' --username={0}'.format(user))
|
||||
return flags
|
||||
|
||||
|
||||
def do_with_password(module, cmd, password):
|
||||
env = {}
|
||||
if password:
|
||||
env = {"PGPASSWORD": password}
|
||||
executed_commands.append(cmd)
|
||||
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
|
||||
return rc, stderr, stdout, cmd
|
||||
|
||||
|
||||
def set_tablespace(cursor, db, tablespace):
|
||||
query = "ALTER DATABASE %s SET TABLESPACE %s" % (
|
||||
pg_quote_identifier(db, 'database'),
|
||||
pg_quote_identifier(tablespace, 'tablespace'))
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pgutils.postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type='str', required=True, aliases=['name']),
|
||||
owner=dict(type='str', default=''),
|
||||
template=dict(type='str', default=''),
|
||||
encoding=dict(type='str', default=''),
|
||||
lc_collate=dict(type='str', default=''),
|
||||
lc_ctype=dict(type='str', default=''),
|
||||
state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
|
||||
target=dict(type='path', default=''),
|
||||
target_opts=dict(type='str', default=''),
|
||||
maintenance_db=dict(type='str', default="postgres"),
|
||||
session_role=dict(type='str'),
|
||||
conn_limit=dict(type='str', default=''),
|
||||
tablespace=dict(type='path', default=''),
|
||||
dump_extra_args=dict(type='str', default=None),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
db = module.params["db"]
|
||||
owner = module.params["owner"]
|
||||
template = module.params["template"]
|
||||
encoding = module.params["encoding"]
|
||||
lc_collate = module.params["lc_collate"]
|
||||
lc_ctype = module.params["lc_ctype"]
|
||||
target = module.params["target"]
|
||||
target_opts = module.params["target_opts"]
|
||||
state = module.params["state"]
|
||||
changed = False
|
||||
maintenance_db = module.params['maintenance_db']
|
||||
session_role = module.params["session_role"]
|
||||
conn_limit = module.params['conn_limit']
|
||||
tablespace = module.params['tablespace']
|
||||
dump_extra_args = module.params['dump_extra_args']
|
||||
|
||||
raw_connection = state in ("dump", "restore")
|
||||
|
||||
if not raw_connection:
|
||||
pgutils.ensure_required_libs(module)
|
||||
|
||||
# To use defaults values, keyword arguments must be absent, so
|
||||
# check which values are empty and don't include in the **kw
|
||||
# dictionary
|
||||
params_map = {
|
||||
"login_host": "host",
|
||||
"login_user": "user",
|
||||
"login_password": "password",
|
||||
"port": "port",
|
||||
"ssl_mode": "sslmode",
|
||||
"ca_cert": "sslrootcert"
|
||||
}
|
||||
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
|
||||
if k in params_map and v != '' and v is not None)
|
||||
|
||||
# If a login_unix_socket is specified, incorporate it here.
|
||||
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
|
||||
|
||||
if is_localhost and module.params["login_unix_socket"] != "":
|
||||
kw["host"] = module.params["login_unix_socket"]
|
||||
|
||||
if target == "":
|
||||
target = "{0}/{1}.sql".format(os.getcwd(), db)
|
||||
target = os.path.expanduser(target)
|
||||
|
||||
if not raw_connection:
|
||||
try:
|
||||
db_connection = psycopg2.connect(database=maintenance_db, **kw)
|
||||
|
||||
# Enable autocommit so we can create databases
|
||||
if psycopg2.__version__ >= '2.4.2':
|
||||
db_connection.autocommit = True
|
||||
else:
|
||||
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||
|
||||
except TypeError as e:
|
||||
if 'sslrootcert' in e.args[0]:
|
||||
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if session_role:
|
||||
try:
|
||||
cursor.execute('SET ROLE "%s"' % session_role)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
try:
|
||||
if module.check_mode:
|
||||
if state == "absent":
|
||||
changed = db_exists(cursor, db)
|
||||
elif state == "present":
|
||||
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
|
||||
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
|
||||
|
||||
if state == "absent":
|
||||
try:
|
||||
changed = db_delete(cursor, db)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
elif state == "present":
|
||||
try:
|
||||
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
elif state in ("dump", "restore"):
|
||||
method = state == "dump" and db_dump or db_restore
|
||||
try:
|
||||
if state == 'dump':
|
||||
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
|
||||
else:
|
||||
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
|
||||
else:
|
||||
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
|
||||
executed_commands=executed_commands)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
except NotSupportedError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
except SystemExit:
|
||||
# Avoid catching this on Python 2.4
|
||||
raise
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
File diff suppressed because it is too large
Load Diff
@ -1,364 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Felix Archambault
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'supported_by': 'community',
|
||||
'status': ['preview']
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_query
|
||||
short_description: Run PostgreSQL queries
|
||||
description:
|
||||
- Runs arbitrary PostgreSQL queries.
|
||||
- Can run queries from SQL script files.
|
||||
- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
|
||||
to run queries on files made by pg_dump/pg_dumpall utilities.
|
||||
version_added: '2.8'
|
||||
options:
|
||||
query:
|
||||
description:
|
||||
- SQL query to run. Variables can be escaped with psycopg2 syntax
|
||||
U(http://initd.org/psycopg/docs/usage.html).
|
||||
type: str
|
||||
positional_args:
|
||||
description:
|
||||
- List of values to be passed as positional arguments to the query.
|
||||
When the value is a list, it will be converted to PostgreSQL array.
|
||||
- Mutually exclusive with I(named_args).
|
||||
type: list
|
||||
elements: raw
|
||||
named_args:
|
||||
description:
|
||||
- Dictionary of key-value arguments to pass to the query.
|
||||
When the value is a list, it will be converted to PostgreSQL array.
|
||||
- Mutually exclusive with I(positional_args).
|
||||
type: dict
|
||||
path_to_script:
|
||||
description:
|
||||
- Path to SQL script on the remote host.
|
||||
- Returns result of the last query in the script.
|
||||
- Mutually exclusive with I(query).
|
||||
type: path
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and run queries against.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
autocommit:
|
||||
description:
|
||||
- Execute in autocommit mode when the query can't be run inside a transaction block
|
||||
(e.g., VACUUM).
|
||||
- Mutually exclusive with I(check_mode).
|
||||
type: bool
|
||||
default: no
|
||||
version_added: '2.9'
|
||||
encoding:
|
||||
description:
|
||||
- Set the client encoding for the current session (e.g. C(UTF-8)).
|
||||
- The default is the encoding defined by the database.
|
||||
type: str
|
||||
version_added: '2.10'
|
||||
seealso:
|
||||
- module: postgresql_db
|
||||
author:
|
||||
- Felix Archambault (@archf)
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
- Will Rouesnel (@wrouesnel)
|
||||
extends_documentation_fragment: postgres
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Simple select query to acme db
|
||||
postgresql_query:
|
||||
db: acme
|
||||
query: SELECT version()
|
||||
|
||||
- name: Select query to db acme with positional arguments and non-default credentials
|
||||
postgresql_query:
|
||||
db: acme
|
||||
login_user: django
|
||||
login_password: mysecretpass
|
||||
query: SELECT * FROM acme WHERE id = %s AND story = %s
|
||||
positional_args:
|
||||
- 1
|
||||
- test
|
||||
|
||||
- name: Select query to test_db with named_args
|
||||
postgresql_query:
|
||||
db: test_db
|
||||
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
|
||||
named_args:
|
||||
id_val: 1
|
||||
story_val: test
|
||||
|
||||
- name: Insert query to test_table in db test_db
|
||||
postgresql_query:
|
||||
db: test_db
|
||||
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
|
||||
|
||||
- name: Run queries from SQL script using UTF-8 client encoding for session
|
||||
postgresql_query:
|
||||
db: test_db
|
||||
path_to_script: /var/lib/pgsql/test.sql
|
||||
positional_args:
|
||||
- 1
|
||||
encoding: UTF-8
|
||||
|
||||
- name: Example of using autocommit parameter
|
||||
postgresql_query:
|
||||
db: test_db
|
||||
query: VACUUM
|
||||
autocommit: yes
|
||||
|
||||
- name: >
|
||||
Insert data to the column of array type using positional_args.
|
||||
Note that we use quotes here, the same as for passing JSON, etc.
|
||||
postgresql_query:
|
||||
query: INSERT INTO test_table (array_column) VALUES (%s)
|
||||
positional_args:
|
||||
- '{1,2,3}'
|
||||
|
||||
# Pass list and string vars as positional_args
|
||||
- name: Set vars
|
||||
set_fact:
|
||||
my_list:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
my_arr: '{1, 2, 3}'
|
||||
|
||||
- name: Select from test table by passing positional_args as arrays
|
||||
postgresql_query:
|
||||
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
|
||||
positional_args:
|
||||
- '{{ my_list }}'
|
||||
- '{{ my_arr|string }}'
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
query:
|
||||
description: Query that was tried to be executed.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'SELECT * FROM bar'
|
||||
statusmessage:
|
||||
description: Attribute containing the message returned by the command.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'INSERT 0 1'
|
||||
query_result:
|
||||
description:
|
||||
- List of dictionaries in column:value form representing returned rows.
|
||||
returned: changed
|
||||
type: list
|
||||
sample: [{"Column": "Value1"},{"Column": "Value2"}]
|
||||
rowcount:
|
||||
description: Number of affected rows.
|
||||
returned: changed
|
||||
type: int
|
||||
sample: 5
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# it is needed for checking 'no result to fetch' in main(),
|
||||
# psycopg2 availability will be checked by connect_to_db() into
|
||||
# ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def list_to_pg_array(elem):
|
||||
"""Convert the passed list to PostgreSQL array
|
||||
represented as a string.
|
||||
|
||||
Args:
|
||||
elem (list): List that needs to be converted.
|
||||
|
||||
Returns:
|
||||
elem (str): String representation of PostgreSQL array.
|
||||
"""
|
||||
elem = str(elem).strip('[]')
|
||||
elem = '{' + elem + '}'
|
||||
return elem
|
||||
|
||||
|
||||
def convert_elements_to_pg_arrays(obj):
|
||||
"""Convert list elements of the passed object
|
||||
to PostgreSQL arrays represented as strings.
|
||||
|
||||
Args:
|
||||
obj (dict or list): Object whose elements need to be converted.
|
||||
|
||||
Returns:
|
||||
obj (dict or list): Object with converted elements.
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
for (key, elem) in iteritems(obj):
|
||||
if isinstance(elem, list):
|
||||
obj[key] = list_to_pg_array(elem)
|
||||
|
||||
elif isinstance(obj, list):
|
||||
for i, elem in enumerate(obj):
|
||||
if isinstance(elem, list):
|
||||
obj[i] = list_to_pg_array(elem)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
query=dict(type='str'),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
positional_args=dict(type='list', elements='raw'),
|
||||
named_args=dict(type='dict'),
|
||||
session_role=dict(type='str'),
|
||||
path_to_script=dict(type='path'),
|
||||
autocommit=dict(type='bool', default=False),
|
||||
encoding=dict(type='str'),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=(('positional_args', 'named_args'),),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
query = module.params["query"]
|
||||
positional_args = module.params["positional_args"]
|
||||
named_args = module.params["named_args"]
|
||||
path_to_script = module.params["path_to_script"]
|
||||
autocommit = module.params["autocommit"]
|
||||
encoding = module.params["encoding"]
|
||||
|
||||
if autocommit and module.check_mode:
|
||||
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
|
||||
|
||||
if path_to_script and query:
|
||||
module.fail_json(msg="path_to_script is mutually exclusive with query")
|
||||
|
||||
if positional_args:
|
||||
positional_args = convert_elements_to_pg_arrays(positional_args)
|
||||
|
||||
elif named_args:
|
||||
named_args = convert_elements_to_pg_arrays(named_args)
|
||||
|
||||
if path_to_script:
|
||||
try:
|
||||
with open(path_to_script, 'rb') as f:
|
||||
query = to_native(f.read())
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
|
||||
if encoding is not None:
|
||||
db_connection.set_client_encoding(encoding)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Prepare args:
|
||||
if module.params.get("positional_args"):
|
||||
arguments = module.params["positional_args"]
|
||||
elif module.params.get("named_args"):
|
||||
arguments = module.params["named_args"]
|
||||
else:
|
||||
arguments = None
|
||||
|
||||
# Set defaults:
|
||||
changed = False
|
||||
|
||||
# Execute query:
|
||||
try:
|
||||
cursor.execute(query, arguments)
|
||||
except Exception as e:
|
||||
if not autocommit:
|
||||
db_connection.rollback()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
|
||||
|
||||
statusmessage = cursor.statusmessage
|
||||
rowcount = cursor.rowcount
|
||||
|
||||
try:
|
||||
query_result = [dict(row) for row in cursor.fetchall()]
|
||||
except Psycopg2ProgrammingError as e:
|
||||
if to_native(e) == 'no results to fetch':
|
||||
query_result = {}
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
|
||||
|
||||
if 'SELECT' not in statusmessage:
|
||||
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
|
||||
s = statusmessage.split()
|
||||
if len(s) == 3:
|
||||
if statusmessage.split()[2] != '0':
|
||||
changed = True
|
||||
|
||||
elif len(s) == 2:
|
||||
if statusmessage.split()[1] != '0':
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
if not autocommit:
|
||||
db_connection.commit()
|
||||
|
||||
kw = dict(
|
||||
changed=changed,
|
||||
query=cursor.query,
|
||||
statusmessage=statusmessage,
|
||||
query_result=query_result,
|
||||
rowcount=rowcount if rowcount >= 0 else 0,
|
||||
)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,434 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_set
|
||||
short_description: Change a PostgreSQL server configuration parameter
|
||||
description:
|
||||
- Allows to change a PostgreSQL server configuration parameter.
|
||||
- The module uses ALTER SYSTEM command and applies changes by reload server configuration.
|
||||
- ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
|
||||
- It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
|
||||
- ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
|
||||
which is read in addition to postgresql.conf.
|
||||
- The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
|
||||
string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
|
||||
- After change you can see in the ansible output the previous and
|
||||
the new parameter value and other information using returned values and M(debug) module.
|
||||
version_added: '2.8'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of PostgreSQL server parameter.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- Parameter value to set.
|
||||
- To remove parameter string from postgresql.auto.conf and
|
||||
reload the server configuration you must pass I(value=default).
|
||||
With I(value=default) the playbook always returns changed is true.
|
||||
type: str
|
||||
reset:
|
||||
description:
|
||||
- Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
|
||||
type: bool
|
||||
default: false
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
notes:
|
||||
- Supported version of PostgreSQL is 9.4 and later.
|
||||
- Pay attention, change setting with 'postmaster' context can return changed is true
|
||||
when actually nothing changes because the same value may be presented in
|
||||
several different form, for example, 1024MB, 1GB, etc. However in pg_settings
|
||||
system view it can be defined like 131072 number of 8kB pages.
|
||||
The final check of the parameter value cannot compare it because the server was
|
||||
not restarted and the value in pg_settings is not updated yet.
|
||||
- For some parameters restart of PostgreSQL server is required.
|
||||
See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
|
||||
seealso:
|
||||
- module: postgresql_info
|
||||
- name: PostgreSQL server configuration
|
||||
description: General information about PostgreSQL server configuration.
|
||||
link: https://www.postgresql.org/docs/current/runtime-config.html
|
||||
- name: PostgreSQL view pg_settings reference
|
||||
description: Complete reference of the pg_settings view documentation.
|
||||
link: https://www.postgresql.org/docs/current/view-pg-settings.html
|
||||
- name: PostgreSQL ALTER SYSTEM command reference
|
||||
description: Complete reference of the ALTER SYSTEM command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altersystem.html
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment: postgres
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Restore wal_keep_segments parameter to initial state
|
||||
postgresql_set:
|
||||
name: wal_keep_segments
|
||||
reset: yes
|
||||
|
||||
# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
|
||||
# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
|
||||
- name: Set work mem parameter
|
||||
postgresql_set:
|
||||
name: work_mem
|
||||
value: 32mb
|
||||
register: set
|
||||
|
||||
- debug:
|
||||
msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
|
||||
when: set.changed
|
||||
# Ensure that the restart of PostgreSQL server must be required for some parameters.
|
||||
# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True'
|
||||
# (If you passed the value that was different from the current server setting).
|
||||
|
||||
- name: Set log_min_duration_statement parameter to 1 second
|
||||
postgresql_set:
|
||||
name: log_min_duration_statement
|
||||
value: 1s
|
||||
|
||||
- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
|
||||
postgresql_set:
|
||||
name: wal_log_hints
|
||||
value: default
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description: Name of PostgreSQL server parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'shared_buffers'
|
||||
restart_required:
|
||||
description: Information about parameter current state.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
prev_val_pretty:
|
||||
description: Information about previous state of the parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: '4MB'
|
||||
value_pretty:
|
||||
description: Information about current state of the parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: '64MB'
|
||||
value:
|
||||
description:
|
||||
- Dictionary that contains the current parameter value (at the time of playbook finish).
|
||||
- Pay attention that for real change some parameters restart of PostgreSQL server is required.
|
||||
- Returns the current value in the check mode.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "value": 67108864, "unit": "b" }
|
||||
context:
|
||||
description:
|
||||
- PostgreSQL setting context.
|
||||
returned: always
|
||||
type: str
|
||||
sample: user
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except Exception:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
PG_REQ_VER = 90400
|
||||
|
||||
# To allow to set value like 1mb instead of 1MB, etc:
|
||||
POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def param_get(cursor, module, name):
|
||||
query = ("SELECT name, setting, unit, context, boot_val "
|
||||
"FROM pg_settings WHERE name = %(name)s")
|
||||
try:
|
||||
cursor.execute(query, {'name': name})
|
||||
info = cursor.fetchall()
|
||||
cursor.execute("SHOW %s" % name)
|
||||
val = cursor.fetchone()
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
|
||||
|
||||
raw_val = info[0][1]
|
||||
unit = info[0][2]
|
||||
context = info[0][3]
|
||||
boot_val = info[0][4]
|
||||
|
||||
if val[0] == 'True':
|
||||
val[0] = 'on'
|
||||
elif val[0] == 'False':
|
||||
val[0] = 'off'
|
||||
|
||||
if unit == 'kB':
|
||||
if int(raw_val) > 0:
|
||||
raw_val = int(raw_val) * 1024
|
||||
if int(boot_val) > 0:
|
||||
boot_val = int(boot_val) * 1024
|
||||
|
||||
unit = 'b'
|
||||
|
||||
elif unit == 'MB':
|
||||
if int(raw_val) > 0:
|
||||
raw_val = int(raw_val) * 1024 * 1024
|
||||
if int(boot_val) > 0:
|
||||
boot_val = int(boot_val) * 1024 * 1024
|
||||
|
||||
unit = 'b'
|
||||
|
||||
return (val[0], raw_val, unit, boot_val, context)
|
||||
|
||||
|
||||
def pretty_to_bytes(pretty_val):
|
||||
# The function returns a value in bytes
|
||||
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
|
||||
# Otherwise it returns the passed argument.
|
||||
|
||||
val_in_bytes = None
|
||||
|
||||
if 'kB' in pretty_val:
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024
|
||||
|
||||
elif 'MB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024
|
||||
|
||||
elif 'GB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024
|
||||
|
||||
elif 'TB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
|
||||
|
||||
elif 'B' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part
|
||||
|
||||
else:
|
||||
return pretty_val
|
||||
|
||||
return val_in_bytes
|
||||
|
||||
|
||||
def param_set(cursor, module, name, value, context):
|
||||
try:
|
||||
if str(value).lower() == 'default':
|
||||
query = "ALTER SYSTEM SET %s = DEFAULT" % name
|
||||
else:
|
||||
query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
|
||||
cursor.execute(query)
|
||||
|
||||
if context != 'postmaster':
|
||||
cursor.execute("SELECT pg_reload_conf()")
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str', required=True),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
value=dict(type='str'),
|
||||
reset=dict(type='bool'),
|
||||
session_role=dict(type='str'),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params["name"]
|
||||
value = module.params["value"]
|
||||
reset = module.params["reset"]
|
||||
|
||||
# Allow to pass values like 1mb instead of 1MB, etc:
|
||||
if value:
|
||||
for unit in POSSIBLE_SIZE_UNITS:
|
||||
if value[:-2].isdigit() and unit in value[-2:]:
|
||||
value = value.upper()
|
||||
|
||||
if value and reset:
|
||||
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
|
||||
|
||||
if not value and not reset:
|
||||
module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
kw = {}
|
||||
# Check server version (needs 9.4 or later):
|
||||
ver = db_connection.server_version
|
||||
if ver < PG_REQ_VER:
|
||||
module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
|
||||
kw = dict(
|
||||
changed=False,
|
||||
restart_required=False,
|
||||
value_pretty="",
|
||||
prev_val_pretty="",
|
||||
value={"value": "", "unit": ""},
|
||||
)
|
||||
kw['name'] = name
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
# Set default returned values:
|
||||
restart_required = False
|
||||
changed = False
|
||||
kw['name'] = name
|
||||
kw['restart_required'] = False
|
||||
|
||||
# Get info about param state:
|
||||
res = param_get(cursor, module, name)
|
||||
current_value = res[0]
|
||||
raw_val = res[1]
|
||||
unit = res[2]
|
||||
boot_val = res[3]
|
||||
context = res[4]
|
||||
|
||||
if value == 'True':
|
||||
value = 'on'
|
||||
elif value == 'False':
|
||||
value = 'off'
|
||||
|
||||
kw['prev_val_pretty'] = current_value
|
||||
kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
|
||||
kw['context'] = context
|
||||
|
||||
# Do job
|
||||
if context == "internal":
|
||||
module.fail_json(msg="%s: cannot be changed (internal context). See "
|
||||
"https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
|
||||
|
||||
if context == "postmaster":
|
||||
restart_required = True
|
||||
|
||||
# If check_mode, just compare and exit:
|
||||
if module.check_mode:
|
||||
if pretty_to_bytes(value) == pretty_to_bytes(current_value):
|
||||
kw['changed'] = False
|
||||
|
||||
else:
|
||||
kw['value_pretty'] = value
|
||||
kw['changed'] = True
|
||||
|
||||
# Anyway returns current raw value in the check_mode:
|
||||
kw['value'] = dict(
|
||||
value=raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
kw['restart_required'] = restart_required
|
||||
module.exit_json(**kw)
|
||||
|
||||
# Set param:
|
||||
if value and value != current_value:
|
||||
changed = param_set(cursor, module, name, value, context)
|
||||
|
||||
kw['value_pretty'] = value
|
||||
|
||||
# Reset param:
|
||||
elif reset:
|
||||
if raw_val == boot_val:
|
||||
# nothing to change, exit:
|
||||
kw['value'] = dict(
|
||||
value=raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
module.exit_json(**kw)
|
||||
|
||||
changed = param_set(cursor, module, name, boot_val, context)
|
||||
|
||||
if restart_required:
|
||||
module.warn("Restart of PostgreSQL is required for setting %s" % name)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Reconnect and recheck current value:
|
||||
if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
res = param_get(cursor, module, name)
|
||||
# f_ means 'final'
|
||||
f_value = res[0]
|
||||
f_raw_val = res[1]
|
||||
|
||||
if raw_val == f_raw_val:
|
||||
changed = False
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
kw['value_pretty'] = f_value
|
||||
kw['value'] = dict(
|
||||
value=f_raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
kw['changed'] = changed
|
||||
kw['restart_required'] = restart_required
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,601 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_table
|
||||
short_description: Create, drop, or modify a PostgreSQL table
|
||||
description:
|
||||
- Allows to create, drop, rename, truncate a table, or change some table attributes.
|
||||
version_added: '2.8'
|
||||
options:
|
||||
table:
|
||||
description:
|
||||
- Table name.
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
|
||||
I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
tablespace:
|
||||
description:
|
||||
- Set a tablespace for the table.
|
||||
required: false
|
||||
type: str
|
||||
owner:
|
||||
description:
|
||||
- Set a table owner.
|
||||
type: str
|
||||
unlogged:
|
||||
description:
|
||||
- Create an unlogged table.
|
||||
type: bool
|
||||
default: no
|
||||
like:
|
||||
description:
|
||||
- Create a table like another table (with similar DDL).
|
||||
Mutually exclusive with I(columns), I(rename), and I(truncate).
|
||||
type: str
|
||||
including:
|
||||
description:
|
||||
- Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
|
||||
Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
|
||||
type: str
|
||||
columns:
|
||||
description:
|
||||
- Columns that are needed.
|
||||
type: list
|
||||
elements: str
|
||||
rename:
|
||||
description:
|
||||
- New table name. Mutually exclusive with I(tablespace), I(owner),
|
||||
I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
|
||||
type: str
|
||||
truncate:
|
||||
description:
|
||||
- Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
|
||||
I(like), I(including), I(columns), I(rename), and I(storage_params).
|
||||
type: bool
|
||||
default: no
|
||||
storage_params:
|
||||
description:
|
||||
- Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
|
||||
Mutually exclusive with I(rename) and I(truncate).
|
||||
type: list
|
||||
elements: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect and where the table will be created.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
cascade:
|
||||
description:
|
||||
- Automatically drop objects that depend on the table (such as views).
|
||||
Used with I(state=absent) only.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: '2.9'
|
||||
notes:
|
||||
- If you do not pass db parameter, tables will be created in the database
|
||||
named postgres.
|
||||
- PostgreSQL allows to create columnless table, so columns param is optional.
|
||||
- Unlogged tables are available from PostgreSQL server version 9.1.
|
||||
seealso:
|
||||
- module: postgresql_sequence
|
||||
- module: postgresql_idx
|
||||
- module: postgresql_info
|
||||
- module: postgresql_tablespace
|
||||
- module: postgresql_owner
|
||||
- module: postgresql_privs
|
||||
- module: postgresql_copy
|
||||
- name: CREATE TABLE reference
|
||||
description: Complete reference of the CREATE TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createtable.html
|
||||
- name: ALTER TABLE reference
|
||||
description: Complete reference of the ALTER TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altertable.html
|
||||
- name: DROP TABLE reference
|
||||
description: Complete reference of the DROP TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droptable.html
|
||||
- name: PostgreSQL data types
|
||||
description: Complete reference of the PostgreSQL data types documentation.
|
||||
link: https://www.postgresql.org/docs/current/datatype.html
|
||||
author:
|
||||
- Andrei Klychkov (@Andersson007)
|
||||
extends_documentation_fragment: postgres
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
|
||||
postgresql_table:
|
||||
db: acme
|
||||
name: tbl2
|
||||
like: tbl1
|
||||
owner: testuser
|
||||
|
||||
- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
|
||||
postgresql_table:
|
||||
db: acme
|
||||
table: tbl2
|
||||
like: tbl1
|
||||
including: comments, indexes
|
||||
tablespace: ssd
|
||||
|
||||
- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
|
||||
postgresql_table:
|
||||
name: test_table
|
||||
columns:
|
||||
- id bigserial primary key
|
||||
- num bigint
|
||||
- stories text
|
||||
tablespace: ssd
|
||||
storage_params:
|
||||
- fillfactor=10
|
||||
- autovacuum_analyze_threshold=1
|
||||
|
||||
- name: Create an unlogged table in schema acme
|
||||
postgresql_table:
|
||||
name: acme.useless_data
|
||||
columns: waste_id int
|
||||
unlogged: true
|
||||
|
||||
- name: Rename table foo to bar
|
||||
postgresql_table:
|
||||
table: foo
|
||||
rename: bar
|
||||
|
||||
- name: Rename table foo from schema acme to bar
|
||||
postgresql_table:
|
||||
name: acme.foo
|
||||
rename: bar
|
||||
|
||||
- name: Set owner to someuser
|
||||
postgresql_table:
|
||||
name: foo
|
||||
owner: someuser
|
||||
|
||||
- name: Change tablespace of foo table to new_tablespace and set owner to new_user
|
||||
postgresql_table:
|
||||
name: foo
|
||||
tablespace: new_tablespace
|
||||
owner: new_user
|
||||
|
||||
- name: Truncate table foo
|
||||
postgresql_table:
|
||||
name: foo
|
||||
truncate: yes
|
||||
|
||||
- name: Drop table foo from schema acme
|
||||
postgresql_table:
|
||||
name: acme.foo
|
||||
state: absent
|
||||
|
||||
- name: Drop table bar cascade
|
||||
postgresql_table:
|
||||
name: bar
|
||||
state: absent
|
||||
cascade: yes
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
table:
|
||||
description: Name of a table.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'foo'
|
||||
state:
|
||||
description: Table state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'present'
|
||||
owner:
|
||||
description: Table owner.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'postgres'
|
||||
tablespace:
|
||||
description: Tablespace.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'ssd_tablespace'
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
|
||||
storage_params:
|
||||
description: Storage parameters.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.database import pg_quote_identifier
|
||||
from ansible.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
class Table(object):
|
||||
def __init__(self, name, module, cursor):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.info = {
|
||||
'owner': '',
|
||||
'tblspace': '',
|
||||
'storage_params': [],
|
||||
}
|
||||
self.exists = False
|
||||
self.__exists_in_db()
|
||||
self.executed_queries = []
|
||||
|
||||
def get_info(self):
|
||||
"""Getter to refresh and get table info"""
|
||||
self.__exists_in_db()
|
||||
|
||||
def __exists_in_db(self):
|
||||
"""Check table exists and refresh info"""
|
||||
if "." in self.name:
|
||||
schema = self.name.split('.')[-2]
|
||||
tblname = self.name.split('.')[-1]
|
||||
else:
|
||||
schema = 'public'
|
||||
tblname = self.name
|
||||
|
||||
query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
|
||||
"FROM pg_tables AS t "
|
||||
"INNER JOIN pg_class AS c ON c.relname = t.tablename "
|
||||
"INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
|
||||
"WHERE t.tablename = %(tblname)s "
|
||||
"AND n.nspname = %(schema)s")
|
||||
res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
|
||||
add_to_executed=False)
|
||||
if res:
|
||||
self.exists = True
|
||||
self.info = dict(
|
||||
owner=res[0][0],
|
||||
tblspace=res[0][1] if res[0][1] else '',
|
||||
storage_params=res[0][2] if res[0][2] else [],
|
||||
)
|
||||
|
||||
return True
|
||||
else:
|
||||
self.exists = False
|
||||
return False
|
||||
|
||||
def create(self, columns='', params='', tblspace='',
|
||||
unlogged=False, owner=''):
|
||||
"""
|
||||
Create table.
|
||||
If table exists, check passed args (params, tblspace, owner) and,
|
||||
if they're different from current, change them.
|
||||
Arguments:
|
||||
params - storage params (passed by "WITH (...)" in SQL),
|
||||
comma separated.
|
||||
tblspace - tablespace.
|
||||
owner - table owner.
|
||||
unlogged - create unlogged table.
|
||||
columns - column string (comma separated).
|
||||
"""
|
||||
name = pg_quote_identifier(self.name, 'table')
|
||||
|
||||
changed = False
|
||||
|
||||
if self.exists:
|
||||
if tblspace == 'pg_default' and self.info['tblspace'] is None:
|
||||
pass # Because they have the same meaning
|
||||
elif tblspace and self.info['tblspace'] != tblspace:
|
||||
self.set_tblspace(tblspace)
|
||||
changed = True
|
||||
|
||||
if owner and self.info['owner'] != owner:
|
||||
self.set_owner(owner)
|
||||
changed = True
|
||||
|
||||
if params:
|
||||
param_list = [p.strip(' ') for p in params.split(',')]
|
||||
|
||||
new_param = False
|
||||
for p in param_list:
|
||||
if p not in self.info['storage_params']:
|
||||
new_param = True
|
||||
|
||||
if new_param:
|
||||
self.set_stor_params(params)
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
return True
|
||||
return False
|
||||
|
||||
query = "CREATE"
|
||||
if unlogged:
|
||||
query += " UNLOGGED TABLE %s" % name
|
||||
else:
|
||||
query += " TABLE %s" % name
|
||||
|
||||
if columns:
|
||||
query += " (%s)" % columns
|
||||
else:
|
||||
query += " ()"
|
||||
|
||||
if params:
|
||||
query += " WITH (%s)" % params
|
||||
|
||||
if tblspace:
|
||||
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
|
||||
|
||||
if exec_sql(self, query, ddl=True):
|
||||
changed = True
|
||||
|
||||
if owner:
|
||||
changed = self.set_owner(owner)
|
||||
|
||||
return changed
|
||||
|
||||
def create_like(self, src_table, including='', tblspace='',
|
||||
unlogged=False, params='', owner=''):
|
||||
"""
|
||||
Create table like another table (with similar DDL).
|
||||
Arguments:
|
||||
src_table - source table.
|
||||
including - corresponds to optional INCLUDING expression
|
||||
in CREATE TABLE ... LIKE statement.
|
||||
params - storage params (passed by "WITH (...)" in SQL),
|
||||
comma separated.
|
||||
tblspace - tablespace.
|
||||
owner - table owner.
|
||||
unlogged - create unlogged table.
|
||||
"""
|
||||
changed = False
|
||||
|
||||
name = pg_quote_identifier(self.name, 'table')
|
||||
|
||||
query = "CREATE"
|
||||
if unlogged:
|
||||
query += " UNLOGGED TABLE %s" % name
|
||||
else:
|
||||
query += " TABLE %s" % name
|
||||
|
||||
query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
|
||||
|
||||
if including:
|
||||
including = including.split(',')
|
||||
for i in including:
|
||||
query += " INCLUDING %s" % i
|
||||
|
||||
query += ')'
|
||||
|
||||
if params:
|
||||
query += " WITH (%s)" % params
|
||||
|
||||
if tblspace:
|
||||
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
|
||||
|
||||
if exec_sql(self, query, ddl=True):
|
||||
changed = True
|
||||
|
||||
if owner:
|
||||
changed = self.set_owner(owner)
|
||||
|
||||
return changed
|
||||
|
||||
def truncate(self):
|
||||
query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
def rename(self, newname):
|
||||
query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
|
||||
pg_quote_identifier(newname, 'table'))
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
def set_owner(self, username):
|
||||
query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
|
||||
pg_quote_identifier(username, 'role'))
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
def drop(self, cascade=False):
|
||||
if not self.exists:
|
||||
return False
|
||||
|
||||
query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
|
||||
if cascade:
|
||||
query += " CASCADE"
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
def set_tblspace(self, tblspace):
|
||||
query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
|
||||
pg_quote_identifier(tblspace, 'database'))
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
def set_stor_params(self, params):
|
||||
query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
|
||||
return exec_sql(self, query, ddl=True)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
table=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
db=dict(type='str', default='', aliases=['login_db']),
|
||||
tablespace=dict(type='str'),
|
||||
owner=dict(type='str'),
|
||||
unlogged=dict(type='bool', default=False),
|
||||
like=dict(type='str'),
|
||||
including=dict(type='str'),
|
||||
rename=dict(type='str'),
|
||||
truncate=dict(type='bool', default=False),
|
||||
columns=dict(type='list', elements='str'),
|
||||
storage_params=dict(type='list', elements='str'),
|
||||
session_role=dict(type='str'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
table = module.params["table"]
|
||||
state = module.params["state"]
|
||||
tablespace = module.params["tablespace"]
|
||||
owner = module.params["owner"]
|
||||
unlogged = module.params["unlogged"]
|
||||
like = module.params["like"]
|
||||
including = module.params["including"]
|
||||
newname = module.params["rename"]
|
||||
storage_params = module.params["storage_params"]
|
||||
truncate = module.params["truncate"]
|
||||
columns = module.params["columns"]
|
||||
cascade = module.params["cascade"]
|
||||
|
||||
if state == 'present' and cascade:
|
||||
module.warn("cascade=true is ignored when state=present")
|
||||
|
||||
# Check mutual exclusive parameters:
|
||||
if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
|
||||
module.fail_json(msg="%s: state=absent is mutually exclusive with: "
|
||||
"truncate, rename, columns, tablespace, "
|
||||
"including, like, storage_params, unlogged, owner" % table)
|
||||
|
||||
if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
|
||||
module.fail_json(msg="%s: truncate is mutually exclusive with: "
|
||||
"rename, columns, like, unlogged, including, "
|
||||
"storage_params, owner, tablespace" % table)
|
||||
|
||||
if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
|
||||
module.fail_json(msg="%s: rename is mutually exclusive with: "
|
||||
"columns, like, unlogged, including, "
|
||||
"storage_params, owner, tablespace" % table)
|
||||
|
||||
if like and columns:
|
||||
module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
|
||||
if including and not like:
|
||||
module.fail_json(msg="%s: including param needs like param specified" % table)
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
if storage_params:
|
||||
storage_params = ','.join(storage_params)
|
||||
|
||||
if columns:
|
||||
columns = ','.join(columns)
|
||||
|
||||
##############
|
||||
# Do main job:
|
||||
table_obj = Table(table, module, cursor)
|
||||
|
||||
# Set default returned values:
|
||||
changed = False
|
||||
kw = {}
|
||||
kw['table'] = table
|
||||
kw['state'] = ''
|
||||
if table_obj.exists:
|
||||
kw = dict(
|
||||
table=table,
|
||||
state='present',
|
||||
owner=table_obj.info['owner'],
|
||||
tablespace=table_obj.info['tblspace'],
|
||||
storage_params=table_obj.info['storage_params'],
|
||||
)
|
||||
|
||||
if state == 'absent':
|
||||
changed = table_obj.drop(cascade=cascade)
|
||||
|
||||
elif truncate:
|
||||
changed = table_obj.truncate()
|
||||
|
||||
elif newname:
|
||||
changed = table_obj.rename(newname)
|
||||
q = table_obj.executed_queries
|
||||
table_obj = Table(newname, module, cursor)
|
||||
table_obj.executed_queries = q
|
||||
|
||||
elif state == 'present' and not like:
|
||||
changed = table_obj.create(columns, storage_params,
|
||||
tablespace, unlogged, owner)
|
||||
|
||||
elif state == 'present' and like:
|
||||
changed = table_obj.create_like(like, including, tablespace,
|
||||
unlogged, storage_params)
|
||||
|
||||
if changed:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
# Refresh table info for RETURN.
|
||||
# Note, if table has been renamed, it gets info by newname:
|
||||
table_obj.get_info()
|
||||
db_connection.commit()
|
||||
if table_obj.exists:
|
||||
kw = dict(
|
||||
table=table,
|
||||
state='present',
|
||||
owner=table_obj.info['owner'],
|
||||
tablespace=table_obj.info['tblspace'],
|
||||
storage_params=table_obj.info['storage_params'],
|
||||
)
|
||||
else:
|
||||
# We just change the table state here
|
||||
# to keep other information about the dropped table:
|
||||
kw['state'] = 'absent'
|
||||
|
||||
kw['queries'] = table_obj.executed_queries
|
||||
kw['changed'] = changed
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,927 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_user
|
||||
short_description: Add or remove a user (role) from a PostgreSQL server instance
|
||||
description:
|
||||
- Adds or removes a user (role) from a PostgreSQL server instance
|
||||
("cluster" in PostgreSQL terminology) and, optionally,
|
||||
grants the user access to an existing database or tables.
|
||||
- A user is a role with login privilege.
|
||||
- The fundamental function of the module is to create, or delete, users from
|
||||
a PostgreSQL instances. Privilege assignment, or removal, is an optional
|
||||
step, which works on one database at a time. This allows for the module to
|
||||
be called several times in the same module to modify the permissions on
|
||||
different databases, or to grant permissions to already existing users.
|
||||
- A user cannot be removed until all the privileges have been stripped from
|
||||
the user. In such situation, if the module tries to remove the user it
|
||||
will fail. To avoid this from happening the fail_on_user option signals
|
||||
the module to try to remove the user, but if not possible keep going; the
|
||||
module will report if changes happened and separately if the user was
|
||||
removed or not.
|
||||
version_added: '0.6'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the user (role) to add or remove.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- user
|
||||
password:
|
||||
description:
|
||||
- Set the user's password, before 1.4 this was required.
|
||||
- Password can be passed unhashed or hashed (MD5-hashed).
|
||||
- Unhashed password will automatically be hashed when saved into the
|
||||
database if C(encrypted) parameter is set, otherwise it will be save in
|
||||
plain text format.
|
||||
- When passing a hashed password it must be generated with the format
|
||||
C('str["md5"] + md5[ password + username ]'), resulting in a total of
|
||||
35 characters. An easy way to do this is C(echo "md5$(echo -n
|
||||
'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
|
||||
- Note that if the provided password string is already in MD5-hashed
|
||||
format, then it is used as-is, regardless of C(encrypted) parameter.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and where user's permissions will be granted.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
fail_on_user:
|
||||
description:
|
||||
- If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
|
||||
default: 'yes'
|
||||
type: bool
|
||||
aliases:
|
||||
- fail_on_role
|
||||
priv:
|
||||
description:
|
||||
- "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
|
||||
privileges can be defined for database ( allowed options - 'CREATE',
|
||||
'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
|
||||
for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
|
||||
'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
|
||||
C(table:SELECT) ). Mixed example of this string:
|
||||
C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
|
||||
type: str
|
||||
role_attr_flags:
|
||||
description:
|
||||
- "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
|
||||
- Note that '[NO]CREATEUSER' is deprecated.
|
||||
- To create a simple role for using it like a group, use C(NOLOGIN) flag.
|
||||
type: str
|
||||
choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
|
||||
'[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
|
||||
session_role:
|
||||
version_added: '2.8'
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
- The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The user (role) state.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
encrypted:
|
||||
description:
|
||||
- Whether the password is stored hashed in the database.
|
||||
- Passwords can be passed already hashed or unhashed, and postgresql
|
||||
ensures the stored password is hashed when C(encrypted) is set.
|
||||
- "Note: Postgresql 10 and newer doesn't support unhashed passwords."
|
||||
- Previous to Ansible 2.6, this was C(no) by default.
|
||||
default: 'yes'
|
||||
type: bool
|
||||
version_added: '1.4'
|
||||
expires:
|
||||
description:
|
||||
- The date at which the user's password is to expire.
|
||||
- If set to C('infinity'), user's password never expire.
|
||||
- Note that this value should be a valid SQL date and time type.
|
||||
type: str
|
||||
version_added: '1.4'
|
||||
no_password_changes:
|
||||
description:
|
||||
- If C(yes), don't inspect database for password changes. Effective when
|
||||
C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
|
||||
password changes as necessary.
|
||||
default: 'no'
|
||||
type: bool
|
||||
version_added: '2.0'
|
||||
conn_limit:
|
||||
description:
|
||||
- Specifies the user (role) connection limit.
|
||||
type: int
|
||||
version_added: '2.4'
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
version_added: '2.3'
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
version_added: '2.3'
|
||||
groups:
|
||||
description:
|
||||
- The list of groups (roles) that need to be granted to the user.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '2.9'
|
||||
comment:
|
||||
description:
|
||||
- Add a comment on the user (equal to the COMMENT ON ROLE statement result).
|
||||
type: str
|
||||
version_added: '2.10'
|
||||
notes:
|
||||
- The module creates a user (role) with login privilege by default.
|
||||
Use NOLOGIN role_attr_flags to change this behaviour.
|
||||
- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
|
||||
You may not specify password or role_attr_flags when the PUBLIC user is specified.
|
||||
seealso:
|
||||
- module: postgresql_privs
|
||||
- module: postgresql_membership
|
||||
- module: postgresql_owner
|
||||
- name: PostgreSQL database roles
|
||||
description: Complete reference of the PostgreSQL database roles documentation.
|
||||
link: https://www.postgresql.org/docs/current/user-manag.html
|
||||
author:
|
||||
- Ansible Core Team
|
||||
extends_documentation_fragment: postgres
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Connect to acme database, create django user, and grant access to database and products table
|
||||
postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
password: ceec4eif7ya
|
||||
priv: "CONNECT/products:ALL"
|
||||
expires: "Jan 31 2020"
|
||||
|
||||
- name: Add a comment on django user
|
||||
postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
comment: This is a test user
|
||||
|
||||
# Connect to default database, create rails user, set its password (MD5-hashed),
|
||||
# and grant privilege to create other databases and demote rails from super user status if user exists
|
||||
- name: Create rails user, set MD5-hashed password, grant privs
|
||||
postgresql_user:
|
||||
name: rails
|
||||
password: md59543f1d82624df2b31672ec0f7050460
|
||||
role_attr_flags: CREATEDB,NOSUPERUSER
|
||||
|
||||
- name: Connect to acme database and remove test user privileges from there
|
||||
postgresql_user:
|
||||
db: acme
|
||||
name: test
|
||||
priv: "ALL/products:ALL"
|
||||
state: absent
|
||||
fail_on_user: no
|
||||
|
||||
- name: Connect to test database, remove test user from cluster
|
||||
postgresql_user:
|
||||
db: test
|
||||
name: test
|
||||
priv: ALL
|
||||
state: absent
|
||||
|
||||
- name: Connect to acme database and set user's password with no expire date
|
||||
postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
password: mysupersecretword
|
||||
priv: "CONNECT/products:ALL"
|
||||
expires: infinity
|
||||
|
||||
# Example privileges string format
|
||||
# INSERT,UPDATE/table:SELECT/anothertable:ALL
|
||||
|
||||
- name: Connect to test database and remove an existing user's password
|
||||
postgresql_user:
|
||||
db: test
|
||||
user: test
|
||||
password: ""
|
||||
|
||||
- name: Create user test and grant group user_ro and user_rw to it
|
||||
postgresql_user:
|
||||
name: test
|
||||
groups:
|
||||
- user_ro
|
||||
- user_rw
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
|
||||
version_added: '2.8'
|
||||
'''
|
||||
|
||||
import itertools
|
||||
import re
|
||||
import traceback
|
||||
from hashlib import md5
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.database import pg_quote_identifier, SQLParseError
|
||||
from ansible.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
PgMembership,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
|
||||
FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
|
||||
|
||||
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
|
||||
database=frozenset(
|
||||
('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
|
||||
)
|
||||
|
||||
# map to cope with idiosyncracies of SUPERUSER and LOGIN
|
||||
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
|
||||
CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
|
||||
REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
|
||||
|
||||
executed_queries = []
|
||||
|
||||
|
||||
class InvalidFlagsError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidPrivsError(Exception):
|
||||
pass
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def user_exists(cursor, user):
|
||||
# The PUBLIC user is a special case that is always there
|
||||
if user == 'PUBLIC':
|
||||
return True
|
||||
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
|
||||
cursor.execute(query, {'user': user})
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
|
||||
"""Create a new database user (role)."""
|
||||
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
|
||||
# literal
|
||||
query_password_data = dict(password=password, expires=expires)
|
||||
query = ['CREATE USER "%(user)s"' %
|
||||
{"user": user}]
|
||||
if password is not None and password != '':
|
||||
query.append("WITH %(crypt)s" % {"crypt": encrypted})
|
||||
query.append("PASSWORD %(password)s")
|
||||
if expires is not None:
|
||||
query.append("VALID UNTIL %(expires)s")
|
||||
if conn_limit is not None:
|
||||
query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
query.append(role_attr_flags)
|
||||
query = ' '.join(query)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query, query_password_data)
|
||||
return True
|
||||
|
||||
|
||||
def user_should_we_change_password(current_role_attrs, user, password, encrypted):
|
||||
"""Check if we should change the user's password.
|
||||
|
||||
Compare the proposed password with the existing one, comparing
|
||||
hashes if encrypted. If we can't access it assume yes.
|
||||
"""
|
||||
|
||||
if current_role_attrs is None:
|
||||
# on some databases, E.g. AWS RDS instances, there is no access to
|
||||
# the pg_authid relation to check the pre-existing password, so we
|
||||
# just assume password is different
|
||||
return True
|
||||
|
||||
# Do we actually need to do anything?
|
||||
pwchanging = False
|
||||
if password is not None:
|
||||
# Empty password means that the role shouldn't have a password, which
|
||||
# means we need to check if the current password is None.
|
||||
if password == '':
|
||||
if current_role_attrs['rolpassword'] is not None:
|
||||
pwchanging = True
|
||||
# 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
|
||||
# 3: The size of the 'md5' prefix
|
||||
# When the provided password looks like a MD5-hash, value of
|
||||
# 'encrypted' is ignored.
|
||||
elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
|
||||
if password != current_role_attrs['rolpassword']:
|
||||
pwchanging = True
|
||||
elif encrypted == 'ENCRYPTED':
|
||||
hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
|
||||
if hashed_password != current_role_attrs['rolpassword']:
|
||||
pwchanging = True
|
||||
|
||||
return pwchanging
|
||||
|
||||
|
||||
def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
|
||||
"""Change user password and/or attributes. Return True if changed, False otherwise."""
|
||||
changed = False
|
||||
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
|
||||
# literal
|
||||
if user == 'PUBLIC':
|
||||
if password is not None:
|
||||
module.fail_json(msg="cannot change the password for PUBLIC user")
|
||||
elif role_attr_flags != '':
|
||||
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
|
||||
else:
|
||||
return False
|
||||
|
||||
# Handle passwords.
|
||||
if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
|
||||
# Select password and all flag-like columns in order to verify changes.
|
||||
try:
|
||||
select = "SELECT * FROM pg_authid where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes.
|
||||
current_role_attrs = cursor.fetchone()
|
||||
except psycopg2.ProgrammingError:
|
||||
current_role_attrs = None
|
||||
db_connection.rollback()
|
||||
|
||||
pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
|
||||
|
||||
if current_role_attrs is None:
|
||||
try:
|
||||
# AWS RDS instances does not allow user to access pg_authid
|
||||
# so try to get current_role_attrs from pg_roles tables
|
||||
select = "SELECT * FROM pg_roles where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes from pg_roles
|
||||
current_role_attrs = cursor.fetchone()
|
||||
except psycopg2.ProgrammingError as e:
|
||||
db_connection.rollback()
|
||||
module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
|
||||
|
||||
role_attr_flags_changing = False
|
||||
if role_attr_flags:
|
||||
role_attr_flags_dict = {}
|
||||
for r in role_attr_flags.split(' '):
|
||||
if r.startswith('NO'):
|
||||
role_attr_flags_dict[r.replace('NO', '', 1)] = False
|
||||
else:
|
||||
role_attr_flags_dict[r] = True
|
||||
|
||||
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
|
||||
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
|
||||
role_attr_flags_changing = True
|
||||
|
||||
if expires is not None:
|
||||
cursor.execute("SELECT %s::timestamptz;", (expires,))
|
||||
expires_with_tz = cursor.fetchone()[0]
|
||||
expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
|
||||
else:
|
||||
expires_changing = False
|
||||
|
||||
conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
|
||||
|
||||
if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
|
||||
return False
|
||||
|
||||
alter = ['ALTER USER "%(user)s"' % {"user": user}]
|
||||
if pwchanging:
|
||||
if password != '':
|
||||
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
|
||||
alter.append("PASSWORD %(password)s")
|
||||
else:
|
||||
alter.append("WITH PASSWORD NULL")
|
||||
alter.append(role_attr_flags)
|
||||
elif role_attr_flags:
|
||||
alter.append('WITH %s' % role_attr_flags)
|
||||
if expires is not None:
|
||||
alter.append("VALID UNTIL %(expires)s")
|
||||
if conn_limit is not None:
|
||||
alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
|
||||
query_password_data = dict(password=password, expires=expires)
|
||||
try:
|
||||
cursor.execute(' '.join(alter), query_password_data)
|
||||
changed = True
|
||||
except psycopg2.InternalError as e:
|
||||
if e.pgcode == '25006':
|
||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
||||
changed = False
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
return changed
|
||||
else:
|
||||
raise psycopg2.InternalError(e)
|
||||
except psycopg2.NotSupportedError as e:
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
|
||||
elif no_password_changes and role_attr_flags != '':
|
||||
# Grab role information from pg_roles instead of pg_authid
|
||||
select = "SELECT * FROM pg_roles where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes.
|
||||
current_role_attrs = cursor.fetchone()
|
||||
|
||||
role_attr_flags_changing = False
|
||||
|
||||
if role_attr_flags:
|
||||
role_attr_flags_dict = {}
|
||||
for r in role_attr_flags.split(' '):
|
||||
if r.startswith('NO'):
|
||||
role_attr_flags_dict[r.replace('NO', '', 1)] = False
|
||||
else:
|
||||
role_attr_flags_dict[r] = True
|
||||
|
||||
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
|
||||
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
|
||||
role_attr_flags_changing = True
|
||||
|
||||
if not role_attr_flags_changing:
|
||||
return False
|
||||
|
||||
alter = ['ALTER USER "%(user)s"' %
|
||||
{"user": user}]
|
||||
if role_attr_flags:
|
||||
alter.append('WITH %s' % role_attr_flags)
|
||||
|
||||
try:
|
||||
cursor.execute(' '.join(alter))
|
||||
except psycopg2.InternalError as e:
|
||||
if e.pgcode == '25006':
|
||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
||||
changed = False
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
return changed
|
||||
else:
|
||||
raise psycopg2.InternalError(e)
|
||||
|
||||
# Grab new role attributes.
|
||||
cursor.execute(select, {"user": user})
|
||||
new_role_attrs = cursor.fetchone()
|
||||
|
||||
# Detect any differences between current_ and new_role_attrs.
|
||||
changed = current_role_attrs != new_role_attrs
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def user_delete(cursor, user):
|
||||
"""Try to remove a user. Returns True if successful otherwise False"""
|
||||
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
|
||||
try:
|
||||
query = 'DROP USER "%s"' % user
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
except Exception:
|
||||
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
|
||||
return False
|
||||
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
|
||||
return True
|
||||
|
||||
|
||||
def has_table_privileges(cursor, user, table, privs):
|
||||
"""
|
||||
Return the difference between the privileges that a user already has and
|
||||
the privileges that they desire to have.
|
||||
|
||||
:returns: tuple of:
|
||||
* privileges that they have and were requested
|
||||
* privileges they currently hold but were not requested
|
||||
* privileges requested that they do not hold
|
||||
"""
|
||||
cur_privs = get_table_privileges(cursor, user, table)
|
||||
have_currently = cur_privs.intersection(privs)
|
||||
other_current = cur_privs.difference(privs)
|
||||
desired = privs.difference(cur_privs)
|
||||
return (have_currently, other_current, desired)
|
||||
|
||||
|
||||
def get_table_privileges(cursor, user, table):
|
||||
if '.' in table:
|
||||
schema, table = table.split('.', 1)
|
||||
else:
|
||||
schema = 'public'
|
||||
query = ("SELECT privilege_type FROM information_schema.role_table_grants "
|
||||
"WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
|
||||
cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
|
||||
return frozenset([x[0] for x in cursor.fetchall()])
|
||||
|
||||
|
||||
def grant_table_privileges(cursor, user, table, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
query = 'GRANT %s ON TABLE %s TO "%s"' % (
|
||||
privs, pg_quote_identifier(table, 'table'), user)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_table_privileges(cursor, user, table, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
|
||||
privs, pg_quote_identifier(table, 'table'), user)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def get_database_privileges(cursor, user, db):
|
||||
priv_map = {
|
||||
'C': 'CREATE',
|
||||
'T': 'TEMPORARY',
|
||||
'c': 'CONNECT',
|
||||
}
|
||||
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
|
||||
cursor.execute(query, (db,))
|
||||
datacl = cursor.fetchone()[0]
|
||||
if datacl is None:
|
||||
return set()
|
||||
r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
|
||||
if r is None:
|
||||
return set()
|
||||
o = set()
|
||||
for v in r.group(1):
|
||||
o.add(priv_map[v])
|
||||
return normalize_privileges(o, 'database')
|
||||
|
||||
|
||||
def has_database_privileges(cursor, user, db, privs):
|
||||
"""
|
||||
Return the difference between the privileges that a user already has and
|
||||
the privileges that they desire to have.
|
||||
|
||||
:returns: tuple of:
|
||||
* privileges that they have and were requested
|
||||
* privileges they currently hold but were not requested
|
||||
* privileges requested that they do not hold
|
||||
"""
|
||||
cur_privs = get_database_privileges(cursor, user, db)
|
||||
have_currently = cur_privs.intersection(privs)
|
||||
other_current = cur_privs.difference(privs)
|
||||
desired = privs.difference(cur_privs)
|
||||
return (have_currently, other_current, desired)
|
||||
|
||||
|
||||
def grant_database_privileges(cursor, user, db, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
if user == "PUBLIC":
|
||||
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
|
||||
privs, pg_quote_identifier(db, 'database'))
|
||||
else:
|
||||
query = 'GRANT %s ON DATABASE %s TO "%s"' % (
|
||||
privs, pg_quote_identifier(db, 'database'), user)
|
||||
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_database_privileges(cursor, user, db, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
if user == "PUBLIC":
|
||||
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
|
||||
privs, pg_quote_identifier(db, 'database'))
|
||||
else:
|
||||
query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
|
||||
privs, pg_quote_identifier(db, 'database'), user)
|
||||
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_privileges(cursor, user, privs):
|
||||
if privs is None:
|
||||
return False
|
||||
|
||||
revoke_funcs = dict(table=revoke_table_privileges,
|
||||
database=revoke_database_privileges)
|
||||
check_funcs = dict(table=has_table_privileges,
|
||||
database=has_database_privileges)
|
||||
|
||||
changed = False
|
||||
for type_ in privs:
|
||||
for name, privileges in iteritems(privs[type_]):
|
||||
# Check that any of the privileges requested to be removed are
|
||||
# currently granted to the user
|
||||
differences = check_funcs[type_](cursor, user, name, privileges)
|
||||
if differences[0]:
|
||||
revoke_funcs[type_](cursor, user, name, privileges)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def grant_privileges(cursor, user, privs):
|
||||
if privs is None:
|
||||
return False
|
||||
|
||||
grant_funcs = dict(table=grant_table_privileges,
|
||||
database=grant_database_privileges)
|
||||
check_funcs = dict(table=has_table_privileges,
|
||||
database=has_database_privileges)
|
||||
|
||||
changed = False
|
||||
for type_ in privs:
|
||||
for name, privileges in iteritems(privs[type_]):
|
||||
# Check that any of the privileges requested for the user are
|
||||
# currently missing
|
||||
differences = check_funcs[type_](cursor, user, name, privileges)
|
||||
if differences[2]:
|
||||
grant_funcs[type_](cursor, user, name, privileges)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def parse_role_attrs(cursor, role_attr_flags):
|
||||
"""
|
||||
Parse role attributes string for user creation.
|
||||
Format:
|
||||
|
||||
attributes[,attributes,...]
|
||||
|
||||
Where:
|
||||
|
||||
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
|
||||
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
|
||||
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
|
||||
"[NO]BYPASSRLS" ]
|
||||
|
||||
Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
|
||||
Note: "[NO]CREATEUSER" role attribute is deprecated.
|
||||
|
||||
"""
|
||||
flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
|
||||
|
||||
valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
|
||||
valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
|
||||
|
||||
if not flags.issubset(valid_flags):
|
||||
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
|
||||
' '.join(flags.difference(valid_flags)))
|
||||
|
||||
return ' '.join(flags)
|
||||
|
||||
|
||||
def normalize_privileges(privs, type_):
|
||||
new_privs = set(privs)
|
||||
if 'ALL' in new_privs:
|
||||
new_privs.update(VALID_PRIVS[type_])
|
||||
new_privs.remove('ALL')
|
||||
if 'TEMP' in new_privs:
|
||||
new_privs.add('TEMPORARY')
|
||||
new_privs.remove('TEMP')
|
||||
|
||||
return new_privs
|
||||
|
||||
|
||||
def parse_privs(privs, db):
|
||||
"""
|
||||
Parse privilege string to determine permissions for database db.
|
||||
Format:
|
||||
|
||||
privileges[/privileges/...]
|
||||
|
||||
Where:
|
||||
|
||||
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
|
||||
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
|
||||
"""
|
||||
if privs is None:
|
||||
return privs
|
||||
|
||||
o_privs = {
|
||||
'database': {},
|
||||
'table': {}
|
||||
}
|
||||
for token in privs.split('/'):
|
||||
if ':' not in token:
|
||||
type_ = 'database'
|
||||
name = db
|
||||
priv_set = frozenset(x.strip().upper()
|
||||
for x in token.split(',') if x.strip())
|
||||
else:
|
||||
type_ = 'table'
|
||||
name, privileges = token.split(':', 1)
|
||||
priv_set = frozenset(x.strip().upper()
|
||||
for x in privileges.split(',') if x.strip())
|
||||
|
||||
if not priv_set.issubset(VALID_PRIVS[type_]):
|
||||
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
|
||||
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
|
||||
|
||||
priv_set = normalize_privileges(priv_set, type_)
|
||||
o_privs[type_][name] = priv_set
|
||||
|
||||
return o_privs
|
||||
|
||||
|
||||
def get_valid_flags_by_version(cursor):
|
||||
"""
|
||||
Some role attributes were introduced after certain versions. We want to
|
||||
compile a list of valid flags against the current Postgres version.
|
||||
"""
|
||||
current_version = cursor.connection.server_version
|
||||
|
||||
return [
|
||||
flag
|
||||
for flag, version_introduced in FLAGS_BY_VERSION.items()
|
||||
if current_version >= version_introduced
|
||||
]
|
||||
|
||||
|
||||
def get_comment(cursor, user):
|
||||
"""Get user's comment."""
|
||||
query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
|
||||
"FROM pg_catalog.pg_roles r "
|
||||
"WHERE r.rolname = %(user)s")
|
||||
cursor.execute(query, {'user': user})
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
|
||||
def add_comment(cursor, user, comment):
|
||||
"""Add comment on user."""
|
||||
if comment != get_comment(cursor, user):
|
||||
query = 'COMMENT ON ROLE "%s" IS ' % user
|
||||
cursor.execute(query + '%(comment)s', {'comment': comment})
|
||||
executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
user=dict(type='str', required=True, aliases=['name']),
|
||||
password=dict(type='str', default=None, no_log=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
priv=dict(type='str', default=None),
|
||||
db=dict(type='str', default='', aliases=['login_db']),
|
||||
fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
|
||||
role_attr_flags=dict(type='str', default=''),
|
||||
encrypted=dict(type='bool', default='yes'),
|
||||
no_password_changes=dict(type='bool', default='no'),
|
||||
expires=dict(type='str', default=None),
|
||||
conn_limit=dict(type='int', default=None),
|
||||
session_role=dict(type='str'),
|
||||
groups=dict(type='list', elements='str'),
|
||||
comment=dict(type='str', default=None),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
user = module.params["user"]
|
||||
password = module.params["password"]
|
||||
state = module.params["state"]
|
||||
fail_on_user = module.params["fail_on_user"]
|
||||
if module.params['db'] == '' and module.params["priv"] is not None:
|
||||
module.fail_json(msg="privileges require a database to be specified")
|
||||
privs = parse_privs(module.params["priv"], module.params["db"])
|
||||
no_password_changes = module.params["no_password_changes"]
|
||||
if module.params["encrypted"]:
|
||||
encrypted = "ENCRYPTED"
|
||||
else:
|
||||
encrypted = "UNENCRYPTED"
|
||||
expires = module.params["expires"]
|
||||
conn_limit = module.params["conn_limit"]
|
||||
role_attr_flags = module.params["role_attr_flags"]
|
||||
groups = module.params["groups"]
|
||||
if groups:
|
||||
groups = [e.strip() for e in groups]
|
||||
comment = module.params["comment"]
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
try:
|
||||
role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
|
||||
except InvalidFlagsError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
kw = dict(user=user)
|
||||
changed = False
|
||||
user_removed = False
|
||||
|
||||
if state == "present":
|
||||
if user_exists(cursor, user):
|
||||
try:
|
||||
changed = user_alter(db_connection, module, user, password,
|
||||
role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
else:
|
||||
try:
|
||||
changed = user_add(cursor, user, password,
|
||||
role_attr_flags, encrypted, expires, conn_limit)
|
||||
except psycopg2.ProgrammingError as e:
|
||||
module.fail_json(msg="Unable to add user with given requirement "
|
||||
"due to : %s" % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
try:
|
||||
changed = grant_privileges(cursor, user, privs) or changed
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if groups:
|
||||
target_roles = []
|
||||
target_roles.append(user)
|
||||
pg_membership = PgMembership(module, cursor, groups, target_roles)
|
||||
changed = pg_membership.grant() or changed
|
||||
executed_queries.extend(pg_membership.executed_queries)
|
||||
|
||||
if comment is not None:
|
||||
try:
|
||||
changed = add_comment(cursor, user, comment) or changed
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
else:
|
||||
if user_exists(cursor, user):
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
kw['user_removed'] = True
|
||||
else:
|
||||
try:
|
||||
changed = revoke_privileges(cursor, user, privs)
|
||||
user_removed = user_delete(cursor, user)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
changed = changed or user_removed
|
||||
if fail_on_user and not user_removed:
|
||||
msg = "Unable to remove user"
|
||||
module.fail_json(msg=msg)
|
||||
kw['user_removed'] = user_removed
|
||||
|
||||
if changed:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
kw['changed'] = changed
|
||||
kw['queries'] = executed_queries
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,180 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2013, Chatham Financial <oss@chathamfinancial.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_plugin
|
||||
short_description: Manage RabbitMQ plugins
|
||||
description:
|
||||
- This module can be used to enable or disable RabbitMQ plugins.
|
||||
version_added: "1.1"
|
||||
author:
|
||||
- Chris Hoffman (@chrishoffman)
|
||||
options:
|
||||
names:
|
||||
description:
|
||||
- Comma-separated list of plugin names. Also, accepts plugin name.
|
||||
required: true
|
||||
aliases: [name]
|
||||
new_only:
|
||||
description:
|
||||
- Only enable missing plugins.
|
||||
- Does not disable plugins that are not in the names list.
|
||||
type: bool
|
||||
default: "no"
|
||||
state:
|
||||
description:
|
||||
- Specify if plugins are to be enabled or disabled.
|
||||
default: enabled
|
||||
choices: [enabled, disabled]
|
||||
prefix:
|
||||
description:
|
||||
- Specify a custom install prefix to a Rabbit.
|
||||
version_added: "1.3"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enables the rabbitmq_management plugin
|
||||
rabbitmq_plugin:
|
||||
names: rabbitmq_management
|
||||
state: enabled
|
||||
|
||||
- name: Enable multiple rabbitmq plugins
|
||||
rabbitmq_plugin:
|
||||
names: rabbitmq_management,rabbitmq_management_visualiser
|
||||
state: enabled
|
||||
|
||||
- name: Disable plugin
|
||||
rabbitmq_plugin:
|
||||
names: rabbitmq_management
|
||||
state: disabled
|
||||
|
||||
- name: Enable every plugin in list with existing plugins
|
||||
rabbitmq_plugin:
|
||||
names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management
|
||||
state: enabled
|
||||
new_only: 'yes'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
enabled:
|
||||
description: list of plugins enabled during task run
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["rabbitmq_management"]
|
||||
disabled:
|
||||
description: list of plugins disabled during task run
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["rabbitmq_management"]
|
||||
'''
|
||||
|
||||
import os
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class RabbitMqPlugins(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
bin_path = ''
|
||||
if module.params['prefix']:
|
||||
if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
|
||||
bin_path = os.path.join(module.params['prefix'], 'bin')
|
||||
elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
|
||||
bin_path = os.path.join(module.params['prefix'], 'sbin')
|
||||
else:
|
||||
# No such path exists.
|
||||
module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix'])
|
||||
|
||||
self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins")
|
||||
else:
|
||||
self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = [self._rabbitmq_plugins]
|
||||
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
|
||||
return out.splitlines()
|
||||
return list()
|
||||
|
||||
def get_all(self):
|
||||
list_output = self._exec(['list', '-E', '-m'], True)
|
||||
plugins = []
|
||||
for plugin in list_output:
|
||||
if not plugin:
|
||||
break
|
||||
plugins.append(plugin)
|
||||
|
||||
return plugins
|
||||
|
||||
def enable(self, name):
|
||||
self._exec(['enable', name])
|
||||
|
||||
def disable(self, name):
|
||||
self._exec(['disable', name])
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
names=dict(required=True, aliases=['name']),
|
||||
new_only=dict(default='no', type='bool'),
|
||||
state=dict(default='enabled', choices=['enabled', 'disabled']),
|
||||
prefix=dict(required=False, default=None)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=arg_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
result = dict()
|
||||
names = module.params['names'].split(',')
|
||||
new_only = module.params['new_only']
|
||||
state = module.params['state']
|
||||
|
||||
rabbitmq_plugins = RabbitMqPlugins(module)
|
||||
enabled_plugins = rabbitmq_plugins.get_all()
|
||||
|
||||
enabled = []
|
||||
disabled = []
|
||||
if state == 'enabled':
|
||||
if not new_only:
|
||||
for plugin in enabled_plugins:
|
||||
if " " in plugin:
|
||||
continue
|
||||
if plugin not in names:
|
||||
rabbitmq_plugins.disable(plugin)
|
||||
disabled.append(plugin)
|
||||
|
||||
for name in names:
|
||||
if name not in enabled_plugins:
|
||||
rabbitmq_plugins.enable(name)
|
||||
enabled.append(name)
|
||||
else:
|
||||
for plugin in enabled_plugins:
|
||||
if plugin in names:
|
||||
rabbitmq_plugins.disable(plugin)
|
||||
disabled.append(plugin)
|
||||
|
||||
result['changed'] = len(enabled) > 0 or len(disabled) > 0
|
||||
result['enabled'] = enabled
|
||||
result['disabled'] = disabled
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,257 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_queue
|
||||
author: Manuel Sousa (@manuel-sousa)
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: Manage rabbitMQ queues
|
||||
description:
|
||||
- This module uses rabbitMQ Rest API to create/delete queues
|
||||
requirements: [ "requests >= 1.0.0" ]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the queue
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the queue should be present or absent
|
||||
choices: [ "present", "absent" ]
|
||||
default: present
|
||||
durable:
|
||||
description:
|
||||
- whether queue is durable or not
|
||||
type: bool
|
||||
default: 'yes'
|
||||
auto_delete:
|
||||
description:
|
||||
- if the queue should delete itself after all queues/queues unbound from it
|
||||
type: bool
|
||||
default: 'no'
|
||||
message_ttl:
|
||||
description:
|
||||
- How long a message can live in queue before it is discarded (milliseconds)
|
||||
default: forever
|
||||
auto_expires:
|
||||
description:
|
||||
- How long a queue can be unused before it is automatically deleted (milliseconds)
|
||||
default: forever
|
||||
max_length:
|
||||
description:
|
||||
- How many messages can the queue contain before it starts rejecting
|
||||
default: no limit
|
||||
dead_letter_exchange:
|
||||
description:
|
||||
- Optional name of an exchange to which messages will be republished if they
|
||||
- are rejected or expire
|
||||
dead_letter_routing_key:
|
||||
description:
|
||||
- Optional replacement routing key to use when a message is dead-lettered.
|
||||
- Original routing key will be used if unset
|
||||
max_priority:
|
||||
description:
|
||||
- Maximum number of priority levels for the queue to support.
|
||||
- If not set, the queue will not support message priorities.
|
||||
- Larger numbers indicate higher priority.
|
||||
version_added: "2.4"
|
||||
arguments:
|
||||
description:
|
||||
- extra arguments for queue. If defined this argument is a key/value dictionary
|
||||
default: {}
|
||||
extends_documentation_fragment:
|
||||
- rabbitmq
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a queue
|
||||
- rabbitmq_queue:
|
||||
name: myQueue
|
||||
|
||||
# Create a queue on remote host
|
||||
- rabbitmq_queue:
|
||||
name: myRemoteQueue
|
||||
login_user: user
|
||||
login_password: secret
|
||||
login_host: remote.example.org
|
||||
'''
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REQUESTS = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
|
||||
from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = rabbitmq_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
durable=dict(default=True, type='bool'),
|
||||
auto_delete=dict(default=False, type='bool'),
|
||||
message_ttl=dict(default=None, type='int'),
|
||||
auto_expires=dict(default=None, type='int'),
|
||||
max_length=dict(default=None, type='int'),
|
||||
dead_letter_exchange=dict(default=None, type='str'),
|
||||
dead_letter_routing_key=dict(default=None, type='str'),
|
||||
arguments=dict(default=dict(), type='dict'),
|
||||
max_priority=dict(default=None, type='int')
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
url = "%s://%s:%s/api/queues/%s/%s" % (
|
||||
module.params['login_protocol'],
|
||||
module.params['login_host'],
|
||||
module.params['login_port'],
|
||||
urllib_parse.quote(module.params['vhost'], ''),
|
||||
module.params['name']
|
||||
)
|
||||
|
||||
if not HAS_REQUESTS:
|
||||
module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
|
||||
|
||||
result = dict(changed=False, name=module.params['name'])
|
||||
|
||||
# Check if queue already exists
|
||||
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
|
||||
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
|
||||
|
||||
if r.status_code == 200:
|
||||
queue_exists = True
|
||||
response = r.json()
|
||||
elif r.status_code == 404:
|
||||
queue_exists = False
|
||||
response = r.text
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="Invalid response from RESTAPI when trying to check if queue exists",
|
||||
details=r.text
|
||||
)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
change_required = not queue_exists
|
||||
else:
|
||||
change_required = queue_exists
|
||||
|
||||
# Check if attributes change on existing queue
|
||||
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
|
||||
if not (
|
||||
response['durable'] == module.params['durable'] and
|
||||
response['auto_delete'] == module.params['auto_delete'] and
|
||||
(
|
||||
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
|
||||
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
|
||||
) and
|
||||
(
|
||||
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
|
||||
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
|
||||
) and
|
||||
(
|
||||
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
|
||||
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
|
||||
) and
|
||||
(
|
||||
('x-dead-letter-exchange' in response['arguments'] and
|
||||
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
|
||||
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
|
||||
) and
|
||||
(
|
||||
('x-dead-letter-routing-key' in response['arguments'] and
|
||||
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
|
||||
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
|
||||
) and
|
||||
(
|
||||
('x-max-priority' in response['arguments'] and
|
||||
response['arguments']['x-max-priority'] == module.params['max_priority']) or
|
||||
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
|
||||
)
|
||||
):
|
||||
module.fail_json(
|
||||
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
|
||||
)
|
||||
|
||||
# Copy parameters to arguments as used by RabbitMQ
|
||||
for k, v in {
|
||||
'message_ttl': 'x-message-ttl',
|
||||
'auto_expires': 'x-expires',
|
||||
'max_length': 'x-max-length',
|
||||
'dead_letter_exchange': 'x-dead-letter-exchange',
|
||||
'dead_letter_routing_key': 'x-dead-letter-routing-key',
|
||||
'max_priority': 'x-max-priority'
|
||||
}.items():
|
||||
if module.params[k] is not None:
|
||||
module.params['arguments'][v] = module.params[k]
|
||||
|
||||
# Exit if check_mode
|
||||
if module.check_mode:
|
||||
result['changed'] = change_required
|
||||
result['details'] = response
|
||||
result['arguments'] = module.params['arguments']
|
||||
module.exit_json(**result)
|
||||
|
||||
# Do changes
|
||||
if change_required:
|
||||
if module.params['state'] == 'present':
|
||||
r = requests.put(
|
||||
url,
|
||||
auth=(module.params['login_user'], module.params['login_password']),
|
||||
headers={"content-type": "application/json"},
|
||||
data=json.dumps({
|
||||
"durable": module.params['durable'],
|
||||
"auto_delete": module.params['auto_delete'],
|
||||
"arguments": module.params['arguments']
|
||||
}),
|
||||
verify=module.params['ca_cert'],
|
||||
cert=(module.params['client_cert'], module.params['client_key'])
|
||||
)
|
||||
elif module.params['state'] == 'absent':
|
||||
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
|
||||
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
|
||||
|
||||
# RabbitMQ 3.6.7 changed this response code from 204 to 201
|
||||
if r.status_code == 204 or r.status_code == 201:
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="Error creating queue",
|
||||
status=r.status_code,
|
||||
details=r.text
|
||||
)
|
||||
|
||||
else:
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
name=module.params['name']
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,740 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'core'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_bucket
|
||||
short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
|
||||
description:
|
||||
- Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
|
||||
version_added: "2.0"
|
||||
requirements: [ boto3 ]
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
force:
|
||||
description:
|
||||
- When trying to delete a bucket, delete all keys (including versions and delete markers)
|
||||
in the bucket first (an s3 bucket must be empty for a successful deletion)
|
||||
type: bool
|
||||
default: 'no'
|
||||
name:
|
||||
description:
|
||||
- Name of the s3 bucket
|
||||
required: true
|
||||
type: str
|
||||
policy:
|
||||
description:
|
||||
- The JSON policy as a string.
|
||||
type: json
|
||||
s3_url:
|
||||
description:
|
||||
- S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
|
||||
- Assumes AWS if not specified.
|
||||
- For Walrus, use FQDN of the endpoint without scheme nor path.
|
||||
aliases: [ S3_URL ]
|
||||
type: str
|
||||
ceph:
|
||||
description:
|
||||
- Enable API compatibility with Ceph. It takes into account the S3 API subset working
|
||||
with Ceph in order to provide the same module behaviour where possible.
|
||||
type: bool
|
||||
version_added: "2.2"
|
||||
requester_pays:
|
||||
description:
|
||||
- With Requester Pays buckets, the requester instead of the bucket owner pays the cost
|
||||
of the request and the data download from the bucket.
|
||||
type: bool
|
||||
default: False
|
||||
state:
|
||||
description:
|
||||
- Create or remove the s3 bucket
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- tags dict to apply to bucket
|
||||
type: dict
|
||||
purge_tags:
|
||||
description:
|
||||
- whether to remove tags that aren't present in the C(tags) parameter
|
||||
type: bool
|
||||
default: True
|
||||
version_added: "2.9"
|
||||
versioning:
|
||||
description:
|
||||
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
|
||||
type: bool
|
||||
encryption:
|
||||
description:
|
||||
- Describes the default server-side encryption to apply to new objects in the bucket.
|
||||
In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
|
||||
choices: [ 'none', 'AES256', 'aws:kms' ]
|
||||
version_added: "2.9"
|
||||
type: str
|
||||
encryption_key_id:
|
||||
description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
|
||||
not specified then it will default to the AWS provided KMS key.
|
||||
version_added: "2.9"
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
notes:
|
||||
- If C(requestPayment), C(policy), C(tagging) or C(versioning)
|
||||
operations/API aren't implemented by the endpoint, module doesn't fail
|
||||
if each parameter satisfies the following condition.
|
||||
I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Create a simple s3 bucket
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: present
|
||||
|
||||
# Create a simple s3 bucket on Ceph Rados Gateway
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
s3_url: http://your-ceph-rados-gateway-server.xxx
|
||||
ceph: true
|
||||
|
||||
# Remove an s3 bucket and any keys it contains
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
policy: "{{ lookup('file','policy.json') }}"
|
||||
requester_pays: yes
|
||||
versioning: yes
|
||||
tags:
|
||||
example: tag1
|
||||
another: tag2
|
||||
|
||||
# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
|
||||
- s3_bucket:
|
||||
name: mydobucket
|
||||
s3_url: 'https://nyc3.digitaloceanspaces.com'
|
||||
|
||||
# Create a bucket with AES256 encryption
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: present
|
||||
encryption: "AES256"
|
||||
|
||||
# Create a bucket with aws:kms encryption, KMS key
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: present
|
||||
encryption: "aws:kms"
|
||||
encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
|
||||
|
||||
# Create a bucket with aws:kms encryption, default key
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: present
|
||||
encryption: "aws:kms"
|
||||
'''
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.basic import to_text
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
|
||||
from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
|
||||
|
||||
try:
|
||||
from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
|
||||
except ImportError:
|
||||
pass # handled by AnsibleAWSModule
|
||||
|
||||
|
||||
def create_or_update_bucket(s3_client, module, location):
|
||||
|
||||
policy = module.params.get("policy")
|
||||
name = module.params.get("name")
|
||||
requester_pays = module.params.get("requester_pays")
|
||||
tags = module.params.get("tags")
|
||||
purge_tags = module.params.get("purge_tags")
|
||||
versioning = module.params.get("versioning")
|
||||
encryption = module.params.get("encryption")
|
||||
encryption_key_id = module.params.get("encryption_key_id")
|
||||
changed = False
|
||||
result = {}
|
||||
|
||||
try:
|
||||
bucket_is_present = bucket_exists(s3_client, name)
|
||||
except EndpointConnectionError as e:
|
||||
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to check bucket presence")
|
||||
|
||||
if not bucket_is_present:
|
||||
try:
|
||||
bucket_changed = create_bucket(s3_client, name, location)
|
||||
s3_client.get_waiter('bucket_exists').wait(Bucket=name)
|
||||
changed = changed or bucket_changed
|
||||
except WaiterError as e:
|
||||
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while creating bucket")
|
||||
|
||||
# Versioning
|
||||
try:
|
||||
versioning_status = get_bucket_versioning(s3_client, name)
|
||||
except BotoCoreError as exp:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket versioning")
|
||||
except ClientError as exp:
|
||||
if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket versioning")
|
||||
else:
|
||||
if versioning is not None:
|
||||
required_versioning = None
|
||||
if versioning and versioning_status.get('Status') != "Enabled":
|
||||
required_versioning = 'Enabled'
|
||||
elif not versioning and versioning_status.get('Status') == "Enabled":
|
||||
required_versioning = 'Suspended'
|
||||
|
||||
if required_versioning:
|
||||
try:
|
||||
put_bucket_versioning(s3_client, name, required_versioning)
|
||||
changed = True
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to update bucket versioning")
|
||||
|
||||
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
|
||||
|
||||
# This output format is there to ensure compatibility with previous versions of the module
|
||||
result['versioning'] = {
|
||||
'Versioning': versioning_status.get('Status', 'Disabled'),
|
||||
'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
|
||||
}
|
||||
|
||||
# Requester pays
|
||||
try:
|
||||
requester_pays_status = get_bucket_request_payment(s3_client, name)
|
||||
except BotoCoreError as exp:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket request payment")
|
||||
except ClientError as exp:
|
||||
if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket request payment")
|
||||
else:
|
||||
if requester_pays:
|
||||
payer = 'Requester' if requester_pays else 'BucketOwner'
|
||||
if requester_pays_status != payer:
|
||||
put_bucket_request_payment(s3_client, name, payer)
|
||||
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
|
||||
if requester_pays_status is None:
|
||||
# We have seen that it happens quite a lot of times that the put request was not taken into
|
||||
# account, so we retry one more time
|
||||
put_bucket_request_payment(s3_client, name, payer)
|
||||
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
|
||||
changed = True
|
||||
|
||||
result['requester_pays'] = requester_pays
|
||||
|
||||
# Policy
|
||||
try:
|
||||
current_policy = get_bucket_policy(s3_client, name)
|
||||
except BotoCoreError as exp:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket policy")
|
||||
except ClientError as exp:
|
||||
if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket policy")
|
||||
else:
|
||||
if policy is not None:
|
||||
if isinstance(policy, string_types):
|
||||
policy = json.loads(policy)
|
||||
|
||||
if not policy and current_policy:
|
||||
try:
|
||||
delete_bucket_policy(s3_client, name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to delete bucket policy")
|
||||
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
|
||||
changed = True
|
||||
elif compare_policies(current_policy, policy):
|
||||
try:
|
||||
put_bucket_policy(s3_client, name, policy)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to update bucket policy")
|
||||
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
|
||||
if current_policy is None:
|
||||
# As for request payement, it happens quite a lot of times that the put request was not taken into
|
||||
# account, so we retry one more time
|
||||
put_bucket_policy(s3_client, name, policy)
|
||||
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
|
||||
changed = True
|
||||
|
||||
result['policy'] = current_policy
|
||||
|
||||
# Tags
|
||||
try:
|
||||
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
|
||||
except BotoCoreError as exp:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket tags")
|
||||
except ClientError as exp:
|
||||
if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
|
||||
module.fail_json_aws(exp, msg="Failed to get bucket tags")
|
||||
else:
|
||||
if tags is not None:
|
||||
# Tags are always returned as text
|
||||
tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
|
||||
if not purge_tags:
|
||||
# Ensure existing tags that aren't updated by desired tags remain
|
||||
current_copy = current_tags_dict.copy()
|
||||
current_copy.update(tags)
|
||||
tags = current_copy
|
||||
if current_tags_dict != tags:
|
||||
if tags:
|
||||
try:
|
||||
put_bucket_tagging(s3_client, name, tags)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to update bucket tags")
|
||||
else:
|
||||
if purge_tags:
|
||||
try:
|
||||
delete_bucket_tagging(s3_client, name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to delete bucket tags")
|
||||
current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
|
||||
changed = True
|
||||
|
||||
result['tags'] = current_tags_dict
|
||||
|
||||
# Encryption
|
||||
if hasattr(s3_client, "get_bucket_encryption"):
|
||||
try:
|
||||
current_encryption = get_bucket_encryption(s3_client, name)
|
||||
except (ClientError, BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get bucket encryption")
|
||||
elif encryption is not None:
|
||||
module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
|
||||
|
||||
if encryption is not None:
|
||||
current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
|
||||
current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
|
||||
if encryption == 'none' and current_encryption_algorithm is not None:
|
||||
try:
|
||||
delete_bucket_encryption(s3_client, name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to delete bucket encryption")
|
||||
current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
|
||||
changed = True
|
||||
elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
|
||||
expected_encryption = {'SSEAlgorithm': encryption}
|
||||
if encryption == 'aws:kms' and encryption_key_id is not None:
|
||||
expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
|
||||
try:
|
||||
put_bucket_encryption(s3_client, name, expected_encryption)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to set bucket encryption")
|
||||
current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
|
||||
changed = True
|
||||
|
||||
result['encryption'] = current_encryption
|
||||
|
||||
module.exit_json(changed=changed, name=name, **result)
|
||||
|
||||
|
||||
def bucket_exists(s3_client, bucket_name):
|
||||
# head_bucket appeared to be really inconsistent, so we use list_buckets instead,
|
||||
# and loop over all the buckets, even if we know it's less performant :(
|
||||
all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
|
||||
return any(bucket['Name'] == bucket_name for bucket in all_buckets)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120)
|
||||
def create_bucket(s3_client, bucket_name, location):
|
||||
try:
|
||||
configuration = {}
|
||||
if location not in ('us-east-1', None):
|
||||
configuration['LocationConstraint'] = location
|
||||
if len(configuration) > 0:
|
||||
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
|
||||
else:
|
||||
s3_client.create_bucket(Bucket=bucket_name)
|
||||
return True
|
||||
except ClientError as e:
|
||||
error_code = e.response['Error']['Code']
|
||||
if error_code == 'BucketAlreadyOwnedByYou':
|
||||
# We should never get there since we check the bucket presence before calling the create_or_update_bucket
|
||||
# method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
|
||||
return False
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def put_bucket_tagging(s3_client, bucket_name, tags):
|
||||
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def put_bucket_policy(s3_client, bucket_name, policy):
|
||||
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def delete_bucket_policy(s3_client, bucket_name):
|
||||
s3_client.delete_bucket_policy(Bucket=bucket_name)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def get_bucket_policy(s3_client, bucket_name):
|
||||
try:
|
||||
current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
|
||||
current_policy = None
|
||||
else:
|
||||
raise e
|
||||
return current_policy
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def put_bucket_request_payment(s3_client, bucket_name, payer):
|
||||
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def get_bucket_request_payment(s3_client, bucket_name):
|
||||
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def get_bucket_versioning(s3_client, bucket_name):
|
||||
return s3_client.get_bucket_versioning(Bucket=bucket_name)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def put_bucket_versioning(s3_client, bucket_name, required_versioning):
|
||||
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def get_bucket_encryption(s3_client, bucket_name):
|
||||
try:
|
||||
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
|
||||
return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
|
||||
return None
|
||||
else:
|
||||
raise e
|
||||
except (IndexError, KeyError):
|
||||
return None
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def put_bucket_encryption(s3_client, bucket_name, encryption):
|
||||
server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
|
||||
s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def delete_bucket_tagging(s3_client, bucket_name):
|
||||
s3_client.delete_bucket_tagging(Bucket=bucket_name)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||
def delete_bucket_encryption(s3_client, bucket_name):
|
||||
s3_client.delete_bucket_encryption(Bucket=bucket_name)
|
||||
|
||||
|
||||
@AWSRetry.exponential_backoff(max_delay=120)
|
||||
def delete_bucket(s3_client, bucket_name):
|
||||
try:
|
||||
s3_client.delete_bucket(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
# This means bucket should have been in a deleting state when we checked it existence
|
||||
# We just ignore the error
|
||||
pass
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
|
||||
for dummy in range(0, 12):
|
||||
try:
|
||||
current_policy = get_bucket_policy(s3_client, bucket_name)
|
||||
except (ClientError, BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get bucket policy")
|
||||
|
||||
if compare_policies(current_policy, expected_policy):
|
||||
time.sleep(5)
|
||||
else:
|
||||
return current_policy
|
||||
if should_fail:
|
||||
module.fail_json(msg="Bucket policy failed to apply in the expected time")
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
|
||||
for dummy in range(0, 12):
|
||||
try:
|
||||
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get bucket request payment")
|
||||
if requester_pays_status != expected_payer:
|
||||
time.sleep(5)
|
||||
else:
|
||||
return requester_pays_status
|
||||
if should_fail:
|
||||
module.fail_json(msg="Bucket request payment failed to apply in the expected time")
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
|
||||
for dummy in range(0, 12):
|
||||
try:
|
||||
encryption = get_bucket_encryption(s3_client, bucket_name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
|
||||
if encryption != expected_encryption:
|
||||
time.sleep(5)
|
||||
else:
|
||||
return encryption
|
||||
module.fail_json(msg="Bucket encryption failed to apply in the expected time")
|
||||
|
||||
|
||||
def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
|
||||
for dummy in range(0, 24):
|
||||
try:
|
||||
versioning_status = get_bucket_versioning(s3_client, bucket_name)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
|
||||
if versioning_status.get('Status') != required_versioning:
|
||||
time.sleep(8)
|
||||
else:
|
||||
return versioning_status
|
||||
module.fail_json(msg="Bucket versioning failed to apply in the expected time")
|
||||
|
||||
|
||||
def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
|
||||
for dummy in range(0, 12):
|
||||
try:
|
||||
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
|
||||
except (ClientError, BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to get bucket policy")
|
||||
if current_tags_dict != expected_tags_dict:
|
||||
time.sleep(5)
|
||||
else:
|
||||
return current_tags_dict
|
||||
module.fail_json(msg="Bucket tags failed to apply in the expected time")
|
||||
|
||||
|
||||
def get_current_bucket_tags_dict(s3_client, bucket_name):
|
||||
try:
|
||||
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchTagSet':
|
||||
return {}
|
||||
raise e
|
||||
|
||||
return boto3_tag_list_to_ansible_dict(current_tags)
|
||||
|
||||
|
||||
def paginated_list(s3_client, **pagination_params):
|
||||
pg = s3_client.get_paginator('list_objects_v2')
|
||||
for page in pg.paginate(**pagination_params):
|
||||
yield [data['Key'] for data in page.get('Contents', [])]
|
||||
|
||||
|
||||
def paginated_versions_list(s3_client, **pagination_params):
|
||||
try:
|
||||
pg = s3_client.get_paginator('list_object_versions')
|
||||
for page in pg.paginate(**pagination_params):
|
||||
# We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
|
||||
yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
|
||||
except is_boto3_error_code('NoSuchBucket'):
|
||||
yield []
|
||||
|
||||
|
||||
def destroy_bucket(s3_client, module):
|
||||
|
||||
force = module.params.get("force")
|
||||
name = module.params.get("name")
|
||||
try:
|
||||
bucket_is_present = bucket_exists(s3_client, name)
|
||||
except EndpointConnectionError as e:
|
||||
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to check bucket presence")
|
||||
|
||||
if not bucket_is_present:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if force:
|
||||
# if there are contents then we need to delete them (including versions) before we can delete the bucket
|
||||
try:
|
||||
for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
|
||||
formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
|
||||
for fk in formatted_keys:
|
||||
# remove VersionId from cases where they are `None` so that
|
||||
# unversioned objects are deleted using `DeleteObject`
|
||||
# rather than `DeleteObjectVersion`, improving backwards
|
||||
# compatibility with older IAM policies.
|
||||
if not fk.get('VersionId'):
|
||||
fk.pop('VersionId')
|
||||
|
||||
if formatted_keys:
|
||||
resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
|
||||
if resp.get('Errors'):
|
||||
module.fail_json(
|
||||
msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
|
||||
', '.join([k['Key'] for k in resp['Errors']])
|
||||
),
|
||||
errors=resp['Errors'], response=resp
|
||||
)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed while deleting bucket")
|
||||
|
||||
try:
|
||||
delete_bucket(s3_client, name)
|
||||
s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
|
||||
except WaiterError as e:
|
||||
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to delete bucket")
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
def is_fakes3(s3_url):
|
||||
""" Return True if s3_url has scheme fakes3:// """
|
||||
if s3_url is not None:
|
||||
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
|
||||
if s3_url and ceph: # TODO - test this
|
||||
ceph = urlparse(s3_url)
|
||||
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||
elif is_fakes3(s3_url):
|
||||
fakes3 = urlparse(s3_url)
|
||||
port = fakes3.port
|
||||
if fakes3.scheme == 'fakes3s':
|
||||
protocol = "https"
|
||||
if port is None:
|
||||
port = 443
|
||||
else:
|
||||
protocol = "http"
|
||||
if port is None:
|
||||
port = 80
|
||||
params = dict(module=module, conn_type='client', resource='s3', region=location,
|
||||
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
|
||||
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
|
||||
else:
|
||||
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||
return boto3_conn(**params)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
force=dict(default=False, type='bool'),
|
||||
policy=dict(type='json'),
|
||||
name=dict(required=True),
|
||||
requester_pays=dict(default=False, type='bool'),
|
||||
s3_url=dict(aliases=['S3_URL']),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
tags=dict(type='dict'),
|
||||
purge_tags=dict(type='bool', default=True),
|
||||
versioning=dict(type='bool'),
|
||||
ceph=dict(default=False, type='bool'),
|
||||
encryption=dict(choices=['none', 'AES256', 'aws:kms']),
|
||||
encryption_key_id=dict()
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleAWSModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region in ('us-east-1', '', None):
|
||||
# default to US Standard region
|
||||
location = 'us-east-1'
|
||||
else:
|
||||
# Boto uses symbolic names for locations but region strings will
|
||||
# actually work fine for everything except us-east-1 (US Standard)
|
||||
location = region
|
||||
|
||||
s3_url = module.params.get('s3_url')
|
||||
ceph = module.params.get('ceph')
|
||||
|
||||
# allow eucarc environment variables to be used if ansible vars aren't set
|
||||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
if ceph and not s3_url:
|
||||
module.fail_json(msg='ceph flavour requires s3_url')
|
||||
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to Ceph RGW, Walrus or fakes3
|
||||
if s3_url:
|
||||
for key in ['validate_certs', 'security_token', 'profile_name']:
|
||||
aws_connect_kwargs.pop(key, None)
|
||||
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
|
||||
|
||||
if s3_client is None: # this should never happen
|
||||
module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
|
||||
|
||||
state = module.params.get("state")
|
||||
encryption = module.params.get("encryption")
|
||||
encryption_key_id = module.params.get("encryption_key_id")
|
||||
|
||||
# Parameter validation
|
||||
if encryption_key_id is not None and encryption is None:
|
||||
module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
|
||||
elif encryption_key_id is not None and encryption != 'aws:kms':
|
||||
module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
|
||||
|
||||
if state == 'present':
|
||||
create_or_update_bucket(s3_client, module, location)
|
||||
elif state == 'absent':
|
||||
destroy_bucket(s3_client, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,260 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
|
||||
# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: selogin
|
||||
short_description: Manages linux user to SELinux user mapping
|
||||
description:
|
||||
- Manages linux user to SELinux user mapping
|
||||
version_added: "2.8"
|
||||
options:
|
||||
login:
|
||||
description:
|
||||
- a Linux user
|
||||
required: true
|
||||
seuser:
|
||||
description:
|
||||
- SELinux user name
|
||||
required: true
|
||||
selevel:
|
||||
aliases: [ serange ]
|
||||
description:
|
||||
- MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
|
||||
default: s0
|
||||
state:
|
||||
description:
|
||||
- Desired mapping value.
|
||||
required: true
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
reload:
|
||||
description:
|
||||
- Reload SELinux policy after commit.
|
||||
default: yes
|
||||
ignore_selinux_state:
|
||||
description:
|
||||
- Run independent of selinux runtime state
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- The changes are persistent across reboots
|
||||
- Not tested on any debian based system
|
||||
requirements: [ 'libselinux', 'policycoreutils' ]
|
||||
author:
|
||||
- Dan Keder (@dankeder)
|
||||
- Petr Lautrbach (@bachradsusi)
|
||||
- James Cassell (@jamescassell)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Modify the default user on the system to the guest_u user
|
||||
- selogin:
|
||||
login: __default__
|
||||
seuser: guest_u
|
||||
state: present
|
||||
|
||||
# Assign gijoe user on an MLS machine a range and to the staff_u user
|
||||
- selogin:
|
||||
login: gijoe
|
||||
seuser: staff_u
|
||||
serange: SystemLow-Secret
|
||||
state: present
|
||||
|
||||
# Assign all users in the engineering group to the staff_u user
|
||||
- selogin:
|
||||
login: '%engineering'
|
||||
seuser: staff_u
|
||||
state: present
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
# Default return values
|
||||
'''
|
||||
|
||||
|
||||
import traceback
|
||||
|
||||
SELINUX_IMP_ERR = None
|
||||
try:
|
||||
import selinux
|
||||
HAVE_SELINUX = True
|
||||
except ImportError:
|
||||
SELINUX_IMP_ERR = traceback.format_exc()
|
||||
HAVE_SELINUX = False
|
||||
|
||||
SEOBJECT_IMP_ERR = None
|
||||
try:
|
||||
import seobject
|
||||
HAVE_SEOBJECT = True
|
||||
except ImportError:
|
||||
SEOBJECT_IMP_ERR = traceback.format_exc()
|
||||
HAVE_SEOBJECT = False
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
|
||||
""" Add linux user to SELinux user mapping
|
||||
|
||||
:type module: AnsibleModule
|
||||
:param module: Ansible module
|
||||
|
||||
:type login: str
|
||||
:param login: a Linux User or a Linux group if it begins with %
|
||||
|
||||
:type seuser: str
|
||||
:param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
|
||||
|
||||
:type serange: str
|
||||
:param serange: SELinux MLS/MCS range (defaults to 's0')
|
||||
|
||||
:type do_reload: bool
|
||||
:param do_reload: Whether to reload SELinux policy after commit
|
||||
|
||||
:type sestore: str
|
||||
:param sestore: SELinux store
|
||||
|
||||
:rtype: bool
|
||||
:return: True if the policy was changed, otherwise False
|
||||
"""
|
||||
try:
|
||||
selogin = seobject.loginRecords(sestore)
|
||||
selogin.set_reload(do_reload)
|
||||
change = False
|
||||
all_logins = selogin.get_all()
|
||||
# module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
|
||||
# for local_login in all_logins:
|
||||
if login not in all_logins.keys():
|
||||
change = True
|
||||
if not module.check_mode:
|
||||
selogin.add(login, seuser, serange)
|
||||
else:
|
||||
if all_logins[login][0] != seuser or all_logins[login][1] != serange:
|
||||
change = True
|
||||
if not module.check_mode:
|
||||
selogin.modify(login, seuser, serange)
|
||||
|
||||
except (ValueError, KeyError, OSError, RuntimeError) as e:
|
||||
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
|
||||
|
||||
return change
|
||||
|
||||
|
||||
def semanage_login_del(module, login, seuser, do_reload, sestore=''):
|
||||
""" Delete linux user to SELinux user mapping
|
||||
|
||||
:type module: AnsibleModule
|
||||
:param module: Ansible module
|
||||
|
||||
:type login: str
|
||||
:param login: a Linux User or a Linux group if it begins with %
|
||||
|
||||
:type seuser: str
|
||||
:param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
|
||||
|
||||
:type do_reload: bool
|
||||
:param do_reload: Whether to reload SELinux policy after commit
|
||||
|
||||
:type sestore: str
|
||||
:param sestore: SELinux store
|
||||
|
||||
:rtype: bool
|
||||
:return: True if the policy was changed, otherwise False
|
||||
"""
|
||||
try:
|
||||
selogin = seobject.loginRecords(sestore)
|
||||
selogin.set_reload(do_reload)
|
||||
change = False
|
||||
all_logins = selogin.get_all()
|
||||
# module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
|
||||
if login in all_logins.keys():
|
||||
change = True
|
||||
if not module.check_mode:
|
||||
selogin.delete(login)
|
||||
|
||||
except (ValueError, KeyError, OSError, RuntimeError) as e:
|
||||
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
|
||||
|
||||
return change
|
||||
|
||||
|
||||
def get_runtime_status(ignore_selinux_state=False):
|
||||
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
ignore_selinux_state=dict(type='bool', default=False),
|
||||
login=dict(type='str', required=True),
|
||||
seuser=dict(type='str'),
|
||||
selevel=dict(type='str', aliases=['serange'], default='s0'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
reload=dict(type='bool', default=True),
|
||||
),
|
||||
required_if=[
|
||||
["state", "present", ["seuser"]]
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
if not HAVE_SELINUX:
|
||||
module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
|
||||
|
||||
if not HAVE_SEOBJECT:
|
||||
module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
|
||||
|
||||
ignore_selinux_state = module.params['ignore_selinux_state']
|
||||
|
||||
if not get_runtime_status(ignore_selinux_state):
|
||||
module.fail_json(msg="SELinux is disabled on this host.")
|
||||
|
||||
login = module.params['login']
|
||||
seuser = module.params['seuser']
|
||||
serange = module.params['selevel']
|
||||
state = module.params['state']
|
||||
do_reload = module.params['reload']
|
||||
|
||||
result = {
|
||||
'login': login,
|
||||
'seuser': seuser,
|
||||
'serange': serange,
|
||||
'state': state,
|
||||
}
|
||||
|
||||
if state == 'present':
|
||||
result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
|
||||
elif state == 'absent':
|
||||
result['changed'] = semanage_login_del(module, login, seuser, do_reload)
|
||||
else:
|
||||
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,783 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: x509_crl
|
||||
version_added: "2.10"
|
||||
short_description: Generate Certificate Revocation Lists (CRLs)
|
||||
description:
|
||||
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
|
||||
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
|
||||
or as a path to a certificate file in PEM format.
|
||||
requirements:
|
||||
- cryptography >= 1.2
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
|
||||
mode:
|
||||
description:
|
||||
- Defines how to process entries of existing CRLs.
|
||||
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
|
||||
as specified in I(revoked_certificates).
|
||||
- If set to C(update), makes sure that the CRL contains the revoked certificates from
|
||||
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
|
||||
already exists, all entries from the existing CRL will also be included in the new CRL.
|
||||
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
|
||||
type: str
|
||||
default: generate
|
||||
choices: [ generate, update ]
|
||||
|
||||
force:
|
||||
description:
|
||||
- Should the CRL be forced to be regenerated.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file including a timestamp so you can get the original
|
||||
CRL back if you overwrote it with a new one by accident.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
path:
|
||||
description:
|
||||
- Remote absolute path where the generated CRL file should be created or is already located.
|
||||
type: path
|
||||
required: yes
|
||||
|
||||
privatekey_path:
|
||||
description:
|
||||
- Path to the CA's private key to use when signing the CRL.
|
||||
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
|
||||
type: path
|
||||
|
||||
privatekey_content:
|
||||
description:
|
||||
- The content of the CA's private key to use when signing the CRL.
|
||||
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
|
||||
type: str
|
||||
|
||||
privatekey_passphrase:
|
||||
description:
|
||||
- The passphrase for the I(privatekey_path).
|
||||
- This is required if the private key is password protected.
|
||||
type: str
|
||||
|
||||
issuer:
|
||||
description:
|
||||
- Key/value pairs that will be present in the issuer name field of the CRL.
|
||||
- If you need to specify more than one value with the same key, use a list as value.
|
||||
- Required if I(state) is C(present).
|
||||
type: dict
|
||||
|
||||
last_update:
|
||||
description:
|
||||
- The point in time from which this CRL can be trusted.
|
||||
- Time can be specified either as relative time or as absolute timestamp.
|
||||
- Time will always be interpreted as UTC.
|
||||
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
|
||||
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
|
||||
- Note that if using relative time this module is NOT idempotent, except when
|
||||
I(ignore_timestamps) is set to C(yes).
|
||||
type: str
|
||||
default: "+0s"
|
||||
|
||||
next_update:
|
||||
description:
|
||||
- "The absolute latest point in time by which this I(issuer) is expected to have issued
|
||||
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
|
||||
- Time can be specified either as relative time or as absolute timestamp.
|
||||
- Time will always be interpreted as UTC.
|
||||
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
|
||||
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
|
||||
- Note that if using relative time this module is NOT idempotent, except when
|
||||
I(ignore_timestamps) is set to C(yes).
|
||||
- Required if I(state) is C(present).
|
||||
type: str
|
||||
|
||||
digest:
|
||||
description:
|
||||
- Digest algorithm to be used when signing the CRL.
|
||||
type: str
|
||||
default: sha256
|
||||
|
||||
revoked_certificates:
|
||||
description:
|
||||
- List of certificates to be revoked.
|
||||
- Required if I(state) is C(present).
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
path:
|
||||
description:
|
||||
- Path to a certificate in PEM format.
|
||||
- The serial number and issuer will be extracted from the certificate.
|
||||
- Mutually exclusive with I(content) and I(serial_number). One of these three options
|
||||
must be specified.
|
||||
type: path
|
||||
content:
|
||||
description:
|
||||
- Content of a certificate in PEM format.
|
||||
- The serial number and issuer will be extracted from the certificate.
|
||||
- Mutually exclusive with I(path) and I(serial_number). One of these three options
|
||||
must be specified.
|
||||
type: str
|
||||
serial_number:
|
||||
description:
|
||||
- Serial number of the certificate.
|
||||
- Mutually exclusive with I(path) and I(content). One of these three options must
|
||||
be specified.
|
||||
type: int
|
||||
revocation_date:
|
||||
description:
|
||||
- The point in time the certificate was revoked.
|
||||
- Time can be specified either as relative time or as absolute timestamp.
|
||||
- Time will always be interpreted as UTC.
|
||||
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
|
||||
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
|
||||
- Note that if using relative time this module is NOT idempotent, except when
|
||||
I(ignore_timestamps) is set to C(yes).
|
||||
type: str
|
||||
default: "+0s"
|
||||
issuer:
|
||||
description:
|
||||
- The certificate's issuer.
|
||||
- "Example: C(DNS:ca.example.org)"
|
||||
type: list
|
||||
elements: str
|
||||
issuer_critical:
|
||||
description:
|
||||
- Whether the certificate issuer extension should be critical.
|
||||
type: bool
|
||||
default: no
|
||||
reason:
|
||||
description:
|
||||
- The value for the revocation reason extension.
|
||||
type: str
|
||||
choices:
|
||||
- unspecified
|
||||
- key_compromise
|
||||
- ca_compromise
|
||||
- affiliation_changed
|
||||
- superseded
|
||||
- cessation_of_operation
|
||||
- certificate_hold
|
||||
- privilege_withdrawn
|
||||
- aa_compromise
|
||||
- remove_from_crl
|
||||
reason_critical:
|
||||
description:
|
||||
- Whether the revocation reason extension should be critical.
|
||||
type: bool
|
||||
default: no
|
||||
invalidity_date:
|
||||
description:
|
||||
- The point in time it was known/suspected that the private key was compromised
|
||||
or that the certificate otherwise became invalid.
|
||||
- Time can be specified either as relative time or as absolute timestamp.
|
||||
- Time will always be interpreted as UTC.
|
||||
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
|
||||
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
|
||||
- Note that if using relative time this module is NOT idempotent. This will NOT
|
||||
change when I(ignore_timestamps) is set to C(yes).
|
||||
type: str
|
||||
invalidity_date_critical:
|
||||
description:
|
||||
- Whether the invalidity date extension should be critical.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
ignore_timestamps:
|
||||
description:
|
||||
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
|
||||
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
|
||||
I(invalidity_date) in I(revoked_certificates) will never be ignored.
|
||||
- Use this in combination with relative timestamps for these values to get idempotency.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
return_content:
|
||||
description:
|
||||
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
|
||||
notes:
|
||||
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
|
||||
- Date specified should be UTC. Minutes and seconds are mandatory.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Generate a CRL
|
||||
x509_crl:
|
||||
path: /etc/ssl/my-ca.crl
|
||||
privatekey_path: /etc/ssl/private/my-ca.pem
|
||||
issuer:
|
||||
CN: My CA
|
||||
last_update: "+0s"
|
||||
next_update: "+7d"
|
||||
revoked_certificates:
|
||||
- serial_number: 1234
|
||||
revocation_date: 20190331202428Z
|
||||
issuer:
|
||||
CN: My CA
|
||||
- serial_number: 2345
|
||||
revocation_date: 20191013152910Z
|
||||
reason: affiliation_changed
|
||||
invalidity_date: 20191001000000Z
|
||||
- path: /etc/ssl/crt/revoked-cert.pem
|
||||
revocation_date: 20191010010203Z
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
filename:
|
||||
description: Path to the generated CRL
|
||||
returned: changed or success
|
||||
type: str
|
||||
sample: /path/to/my-ca.crl
|
||||
backup_file:
|
||||
description: Name of backup file created.
|
||||
returned: changed and if I(backup) is C(yes)
|
||||
type: str
|
||||
sample: /path/to/my-ca.crl.2019-03-09@11:22~
|
||||
privatekey:
|
||||
description: Path to the private CA key
|
||||
returned: changed or success
|
||||
type: str
|
||||
sample: /path/to/my-ca.pem
|
||||
issuer:
|
||||
description:
|
||||
- The CRL's issuer.
|
||||
- Note that for repeated values, only the last one will be returned.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
|
||||
issuer_ordered:
|
||||
description: The CRL's issuer as an ordered list of tuples.
|
||||
returned: success
|
||||
type: list
|
||||
elements: list
|
||||
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
|
||||
last_update:
|
||||
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
next_update:
|
||||
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
digest:
|
||||
description: The signature algorithm used to sign the CRL.
|
||||
returned: success
|
||||
type: str
|
||||
sample: sha256WithRSAEncryption
|
||||
revoked_certificates:
|
||||
description: List of certificates to be revoked.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
serial_number:
|
||||
description: Serial number of the certificate.
|
||||
type: int
|
||||
sample: 1234
|
||||
revocation_date:
|
||||
description: The point in time the certificate was revoked as ASN.1 TIME.
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
issuer:
|
||||
description: The certificate's issuer.
|
||||
type: list
|
||||
elements: str
|
||||
sample: '["DNS:ca.example.org"]'
|
||||
issuer_critical:
|
||||
description: Whether the certificate issuer extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
reason:
|
||||
description:
|
||||
- The value for the revocation reason extension.
|
||||
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
|
||||
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
|
||||
C(remove_from_crl).
|
||||
type: str
|
||||
sample: key_compromise
|
||||
reason_critical:
|
||||
description: Whether the revocation reason extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
invalidity_date:
|
||||
description: |
|
||||
The point in time it was known/suspected that the private key was compromised
|
||||
or that the certificate otherwise became invalid as ASN.1 TIME.
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
invalidity_date_critical:
|
||||
description: Whether the invalidity date extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
crl:
|
||||
description: The (current or generated) CRL's content.
|
||||
returned: if I(state) is C(present) and I(return_content) is C(yes)
|
||||
type: str
|
||||
'''
|
||||
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from ansible.module_utils.compat.version import LooseVersion
|
||||
|
||||
from ansible.module_utils import crypto as crypto_utils
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
|
||||
|
||||
CRYPTOGRAPHY_IMP_ERR = None
|
||||
try:
|
||||
import cryptography
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.serialization import Encoding
|
||||
from cryptography.x509 import (
|
||||
CertificateRevocationListBuilder,
|
||||
RevokedCertificateBuilder,
|
||||
NameAttribute,
|
||||
Name,
|
||||
)
|
||||
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
|
||||
except ImportError:
|
||||
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
|
||||
CRYPTOGRAPHY_FOUND = False
|
||||
else:
|
||||
CRYPTOGRAPHY_FOUND = True
|
||||
|
||||
|
||||
TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
|
||||
|
||||
|
||||
class CRLError(crypto_utils.OpenSSLObjectError):
|
||||
pass
|
||||
|
||||
|
||||
class CRL(crypto_utils.OpenSSLObject):
|
||||
|
||||
def __init__(self, module):
|
||||
super(CRL, self).__init__(
|
||||
module.params['path'],
|
||||
module.params['state'],
|
||||
module.params['force'],
|
||||
module.check_mode
|
||||
)
|
||||
|
||||
self.update = module.params['mode'] == 'update'
|
||||
self.ignore_timestamps = module.params['ignore_timestamps']
|
||||
self.return_content = module.params['return_content']
|
||||
self.crl_content = None
|
||||
|
||||
self.privatekey_path = module.params['privatekey_path']
|
||||
self.privatekey_content = module.params['privatekey_content']
|
||||
if self.privatekey_content is not None:
|
||||
self.privatekey_content = self.privatekey_content.encode('utf-8')
|
||||
self.privatekey_passphrase = module.params['privatekey_passphrase']
|
||||
|
||||
self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
|
||||
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
|
||||
|
||||
self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update')
|
||||
self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update')
|
||||
|
||||
self.digest = crypto_utils.select_message_digest(module.params['digest'])
|
||||
if self.digest is None:
|
||||
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
|
||||
|
||||
self.revoked_certificates = []
|
||||
for i, rc in enumerate(module.params['revoked_certificates']):
|
||||
result = {
|
||||
'serial_number': None,
|
||||
'revocation_date': None,
|
||||
'issuer': None,
|
||||
'issuer_critical': False,
|
||||
'reason': None,
|
||||
'reason_critical': False,
|
||||
'invalidity_date': None,
|
||||
'invalidity_date_critical': False,
|
||||
}
|
||||
path_prefix = 'revoked_certificates[{0}].'.format(i)
|
||||
if rc['path'] is not None or rc['content'] is not None:
|
||||
# Load certificate from file or content
|
||||
try:
|
||||
if rc['content'] is not None:
|
||||
rc['content'] = rc['content'].encode('utf-8')
|
||||
cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography')
|
||||
try:
|
||||
result['serial_number'] = cert.serial_number
|
||||
except AttributeError:
|
||||
# The property was called "serial" before cryptography 1.4
|
||||
result['serial_number'] = cert.serial
|
||||
except crypto_utils.OpenSSLObjectError as e:
|
||||
if rc['content'] is not None:
|
||||
module.fail_json(
|
||||
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
|
||||
)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
|
||||
)
|
||||
else:
|
||||
# Specify serial_number (and potentially issuer) directly
|
||||
result['serial_number'] = rc['serial_number']
|
||||
# All other options
|
||||
if rc['issuer']:
|
||||
result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']]
|
||||
result['issuer_critical'] = rc['issuer_critical']
|
||||
result['revocation_date'] = crypto_utils.get_relative_time_option(
|
||||
rc['revocation_date'],
|
||||
path_prefix + 'revocation_date'
|
||||
)
|
||||
if rc['reason']:
|
||||
result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']]
|
||||
result['reason_critical'] = rc['reason_critical']
|
||||
if rc['invalidity_date']:
|
||||
result['invalidity_date'] = crypto_utils.get_relative_time_option(
|
||||
rc['invalidity_date'],
|
||||
path_prefix + 'invalidity_date'
|
||||
)
|
||||
result['invalidity_date_critical'] = rc['invalidity_date_critical']
|
||||
self.revoked_certificates.append(result)
|
||||
|
||||
self.module = module
|
||||
|
||||
self.backup = module.params['backup']
|
||||
self.backup_file = None
|
||||
|
||||
try:
|
||||
self.privatekey = crypto_utils.load_privatekey(
|
||||
path=self.privatekey_path,
|
||||
content=self.privatekey_content,
|
||||
passphrase=self.privatekey_passphrase,
|
||||
backend='cryptography'
|
||||
)
|
||||
except crypto_utils.OpenSSLBadPassphraseError as exc:
|
||||
raise CRLError(exc)
|
||||
|
||||
self.crl = None
|
||||
try:
|
||||
with open(self.path, 'rb') as f:
|
||||
data = f.read()
|
||||
self.crl = x509.load_pem_x509_crl(data, default_backend())
|
||||
if self.return_content:
|
||||
self.crl_content = data
|
||||
except Exception as dummy:
|
||||
self.crl_content = None
|
||||
|
||||
def remove(self):
|
||||
if self.backup:
|
||||
self.backup_file = self.module.backup_local(self.path)
|
||||
super(CRL, self).remove(self.module)
|
||||
|
||||
def _compress_entry(self, entry):
|
||||
if self.ignore_timestamps:
|
||||
# Throw out revocation_date
|
||||
return (
|
||||
entry['serial_number'],
|
||||
tuple(entry['issuer']) if entry['issuer'] is not None else None,
|
||||
entry['issuer_critical'],
|
||||
entry['reason'],
|
||||
entry['reason_critical'],
|
||||
entry['invalidity_date'],
|
||||
entry['invalidity_date_critical'],
|
||||
)
|
||||
else:
|
||||
return (
|
||||
entry['serial_number'],
|
||||
entry['revocation_date'],
|
||||
tuple(entry['issuer']) if entry['issuer'] is not None else None,
|
||||
entry['issuer_critical'],
|
||||
entry['reason'],
|
||||
entry['reason_critical'],
|
||||
entry['invalidity_date'],
|
||||
entry['invalidity_date_critical'],
|
||||
)
|
||||
|
||||
def check(self, perms_required=True):
|
||||
"""Ensure the resource is in its desired state."""
|
||||
|
||||
state_and_perms = super(CRL, self).check(self.module, perms_required)
|
||||
|
||||
if not state_and_perms:
|
||||
return False
|
||||
|
||||
if self.crl is None:
|
||||
return False
|
||||
|
||||
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
|
||||
return False
|
||||
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
|
||||
return False
|
||||
if self.digest.name != self.crl.signature_hash_algorithm.name:
|
||||
return False
|
||||
|
||||
want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
|
||||
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
|
||||
return False
|
||||
|
||||
old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
|
||||
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
|
||||
if self.update:
|
||||
# We don't simply use a set so that duplicate entries are treated correctly
|
||||
for entry in new_entries:
|
||||
try:
|
||||
old_entries.remove(entry)
|
||||
except ValueError:
|
||||
return False
|
||||
else:
|
||||
if old_entries != new_entries:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _generate_crl(self):
|
||||
backend = default_backend()
|
||||
crl = CertificateRevocationListBuilder()
|
||||
|
||||
try:
|
||||
crl = crl.issuer_name(Name([
|
||||
NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1]))
|
||||
for entry in self.issuer
|
||||
]))
|
||||
except ValueError as e:
|
||||
raise CRLError(e)
|
||||
|
||||
crl = crl.last_update(self.last_update)
|
||||
crl = crl.next_update(self.next_update)
|
||||
|
||||
if self.update and self.crl:
|
||||
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
|
||||
for entry in self.crl:
|
||||
decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry))
|
||||
if decoded_entry not in new_entries:
|
||||
crl = crl.add_revoked_certificate(entry)
|
||||
for entry in self.revoked_certificates:
|
||||
revoked_cert = RevokedCertificateBuilder()
|
||||
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
|
||||
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
|
||||
if entry['issuer'] is not None:
|
||||
revoked_cert = revoked_cert.add_extension(
|
||||
x509.CertificateIssuer([
|
||||
crypto_utils.cryptography_get_name(name) for name in self.entry['issuer']
|
||||
]),
|
||||
entry['issuer_critical']
|
||||
)
|
||||
if entry['reason'] is not None:
|
||||
revoked_cert = revoked_cert.add_extension(
|
||||
x509.CRLReason(entry['reason']),
|
||||
entry['reason_critical']
|
||||
)
|
||||
if entry['invalidity_date'] is not None:
|
||||
revoked_cert = revoked_cert.add_extension(
|
||||
x509.InvalidityDate(entry['invalidity_date']),
|
||||
entry['invalidity_date_critical']
|
||||
)
|
||||
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
|
||||
|
||||
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
|
||||
return self.crl.public_bytes(Encoding.PEM)
|
||||
|
||||
def generate(self):
|
||||
if not self.check(perms_required=False) or self.force:
|
||||
result = self._generate_crl()
|
||||
if self.return_content:
|
||||
self.crl_content = result
|
||||
if self.backup:
|
||||
self.backup_file = self.module.backup_local(self.path)
|
||||
crypto_utils.write_file(self.module, result)
|
||||
self.changed = True
|
||||
|
||||
file_args = self.module.load_file_common_arguments(self.module.params)
|
||||
if self.module.set_fs_attributes_if_different(file_args, False):
|
||||
self.changed = True
|
||||
|
||||
def _dump_revoked(self, entry):
|
||||
return {
|
||||
'serial_number': entry['serial_number'],
|
||||
'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
|
||||
'issuer':
|
||||
[crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
|
||||
if entry['issuer'] is not None else None,
|
||||
'issuer_critical': entry['issuer_critical'],
|
||||
'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
|
||||
'reason_critical': entry['reason_critical'],
|
||||
'invalidity_date':
|
||||
entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
|
||||
if entry['invalidity_date'] is not None else None,
|
||||
'invalidity_date_critical': entry['invalidity_date_critical'],
|
||||
}
|
||||
|
||||
def dump(self, check_mode=False):
|
||||
result = {
|
||||
'changed': self.changed,
|
||||
'filename': self.path,
|
||||
'privatekey': self.privatekey_path,
|
||||
'last_update': None,
|
||||
'next_update': None,
|
||||
'digest': None,
|
||||
'issuer_ordered': None,
|
||||
'issuer': None,
|
||||
'revoked_certificates': [],
|
||||
}
|
||||
if self.backup_file:
|
||||
result['backup_file'] = self.backup_file
|
||||
|
||||
if check_mode:
|
||||
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
|
||||
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
|
||||
# result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
|
||||
result['digest'] = self.module.params['digest']
|
||||
result['issuer_ordered'] = self.issuer
|
||||
result['issuer'] = {}
|
||||
for k, v in self.issuer:
|
||||
result['issuer'][k] = v
|
||||
result['revoked_certificates'] = []
|
||||
for entry in self.revoked_certificates:
|
||||
result['revoked_certificates'].append(self._dump_revoked(entry))
|
||||
elif self.crl:
|
||||
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
|
||||
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
|
||||
try:
|
||||
result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
|
||||
except AttributeError:
|
||||
# Older cryptography versions don't have signature_algorithm_oid yet
|
||||
dotted = crypto_utils._obj2txt(
|
||||
self.crl._backend._lib,
|
||||
self.crl._backend._ffi,
|
||||
self.crl._x509_crl.sig_alg.algorithm
|
||||
)
|
||||
oid = x509.oid.ObjectIdentifier(dotted)
|
||||
result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
|
||||
issuer = []
|
||||
for attribute in self.crl.issuer:
|
||||
issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
|
||||
result['issuer_ordered'] = issuer
|
||||
result['issuer'] = {}
|
||||
for k, v in issuer:
|
||||
result['issuer'][k] = v
|
||||
result['revoked_certificates'] = []
|
||||
for cert in self.crl:
|
||||
entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
|
||||
result['revoked_certificates'].append(self._dump_revoked(entry))
|
||||
|
||||
if self.return_content:
|
||||
result['crl'] = self.crl_content
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
mode=dict(type='str', default='generate', choices=['generate', 'update']),
|
||||
force=dict(type='bool', default=False),
|
||||
backup=dict(type='bool', default=False),
|
||||
path=dict(type='path', required=True),
|
||||
privatekey_path=dict(type='path'),
|
||||
privatekey_content=dict(type='str'),
|
||||
privatekey_passphrase=dict(type='str', no_log=True),
|
||||
issuer=dict(type='dict'),
|
||||
last_update=dict(type='str', default='+0s'),
|
||||
next_update=dict(type='str'),
|
||||
digest=dict(type='str', default='sha256'),
|
||||
ignore_timestamps=dict(type='bool', default=False),
|
||||
return_content=dict(type='bool', default=False),
|
||||
revoked_certificates=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
path=dict(type='path'),
|
||||
content=dict(type='str'),
|
||||
serial_number=dict(type='int'),
|
||||
revocation_date=dict(type='str', default='+0s'),
|
||||
issuer=dict(type='list', elements='str'),
|
||||
issuer_critical=dict(type='bool', default=False),
|
||||
reason=dict(
|
||||
type='str',
|
||||
choices=[
|
||||
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
|
||||
'superseded', 'cessation_of_operation', 'certificate_hold',
|
||||
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
|
||||
]
|
||||
),
|
||||
reason_critical=dict(type='bool', default=False),
|
||||
invalidity_date=dict(type='str'),
|
||||
invalidity_date_critical=dict(type='bool', default=False),
|
||||
),
|
||||
required_one_of=[['path', 'content', 'serial_number']],
|
||||
mutually_exclusive=[['path', 'content', 'serial_number']],
|
||||
),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
|
||||
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
|
||||
],
|
||||
mutually_exclusive=(
|
||||
['privatekey_path', 'privatekey_content'],
|
||||
),
|
||||
supports_check_mode=True,
|
||||
add_file_common_args=True,
|
||||
)
|
||||
|
||||
if not CRYPTOGRAPHY_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
|
||||
exception=CRYPTOGRAPHY_IMP_ERR)
|
||||
|
||||
try:
|
||||
crl = CRL(module)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
if module.check_mode:
|
||||
result = crl.dump(check_mode=True)
|
||||
result['changed'] = module.params['force'] or not crl.check()
|
||||
module.exit_json(**result)
|
||||
|
||||
crl.generate()
|
||||
else:
|
||||
if module.check_mode:
|
||||
result = crl.dump(check_mode=True)
|
||||
result['changed'] = os.path.exists(module.params['path'])
|
||||
module.exit_json(**result)
|
||||
|
||||
crl.remove()
|
||||
|
||||
result = crl.dump()
|
||||
module.exit_json(**result)
|
||||
except crypto_utils.OpenSSLObjectError as exc:
|
||||
module.fail_json(msg=to_native(exc))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,281 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: x509_crl_info
|
||||
version_added: "2.10"
|
||||
short_description: Retrieve information on Certificate Revocation Lists (CRLs)
|
||||
description:
|
||||
- This module allows one to retrieve information on Certificate Revocation Lists (CRLs).
|
||||
requirements:
|
||||
- cryptography >= 1.2
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Remote absolute path where the generated CRL file should be created or is already located.
|
||||
- Either I(path) or I(content) must be specified, but not both.
|
||||
type: path
|
||||
content:
|
||||
description:
|
||||
- Content of the X.509 certificate in PEM format.
|
||||
- Either I(path) or I(content) must be specified, but not both.
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
|
||||
They are all in UTC.
|
||||
seealso:
|
||||
- module: x509_crl
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get information on CRL
|
||||
x509_crl_info:
|
||||
path: /etc/ssl/my-ca.crl
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result }}"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
issuer:
|
||||
description:
|
||||
- The CRL's issuer.
|
||||
- Note that for repeated values, only the last one will be returned.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
|
||||
issuer_ordered:
|
||||
description: The CRL's issuer as an ordered list of tuples.
|
||||
returned: success
|
||||
type: list
|
||||
elements: list
|
||||
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
|
||||
last_update:
|
||||
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
next_update:
|
||||
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
digest:
|
||||
description: The signature algorithm used to sign the CRL.
|
||||
returned: success
|
||||
type: str
|
||||
sample: sha256WithRSAEncryption
|
||||
revoked_certificates:
|
||||
description: List of certificates to be revoked.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
serial_number:
|
||||
description: Serial number of the certificate.
|
||||
type: int
|
||||
sample: 1234
|
||||
revocation_date:
|
||||
description: The point in time the certificate was revoked as ASN.1 TIME.
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
issuer:
|
||||
description: The certificate's issuer.
|
||||
type: list
|
||||
elements: str
|
||||
sample: '["DNS:ca.example.org"]'
|
||||
issuer_critical:
|
||||
description: Whether the certificate issuer extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
reason:
|
||||
description:
|
||||
- The value for the revocation reason extension.
|
||||
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
|
||||
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
|
||||
C(remove_from_crl).
|
||||
type: str
|
||||
sample: key_compromise
|
||||
reason_critical:
|
||||
description: Whether the revocation reason extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
invalidity_date:
|
||||
description: |
|
||||
The point in time it was known/suspected that the private key was compromised
|
||||
or that the certificate otherwise became invalid as ASN.1 TIME.
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
invalidity_date_critical:
|
||||
description: Whether the invalidity date extension is critical.
|
||||
type: bool
|
||||
sample: no
|
||||
'''
|
||||
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.compat.version import LooseVersion
|
||||
|
||||
from ansible.module_utils import crypto as crypto_utils
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
|
||||
|
||||
CRYPTOGRAPHY_IMP_ERR = None
|
||||
try:
|
||||
import cryptography
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
|
||||
except ImportError:
|
||||
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
|
||||
CRYPTOGRAPHY_FOUND = False
|
||||
else:
|
||||
CRYPTOGRAPHY_FOUND = True
|
||||
|
||||
|
||||
TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
|
||||
|
||||
|
||||
class CRLError(crypto_utils.OpenSSLObjectError):
|
||||
pass
|
||||
|
||||
|
||||
class CRLInfo(crypto_utils.OpenSSLObject):
|
||||
"""The main module implementation."""
|
||||
|
||||
def __init__(self, module):
|
||||
super(CRLInfo, self).__init__(
|
||||
module.params['path'] or '',
|
||||
'present',
|
||||
False,
|
||||
module.check_mode
|
||||
)
|
||||
|
||||
self.content = module.params['content']
|
||||
|
||||
self.module = module
|
||||
|
||||
self.crl = None
|
||||
if self.content is None:
|
||||
try:
|
||||
with open(self.path, 'rb') as f:
|
||||
data = f.read()
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e))
|
||||
else:
|
||||
data = self.content.encode('utf-8')
|
||||
|
||||
try:
|
||||
self.crl = x509.load_pem_x509_crl(data, default_backend())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e))
|
||||
|
||||
def _dump_revoked(self, entry):
|
||||
return {
|
||||
'serial_number': entry['serial_number'],
|
||||
'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
|
||||
'issuer':
|
||||
[crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
|
||||
if entry['issuer'] is not None else None,
|
||||
'issuer_critical': entry['issuer_critical'],
|
||||
'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
|
||||
'reason_critical': entry['reason_critical'],
|
||||
'invalidity_date':
|
||||
entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
|
||||
if entry['invalidity_date'] is not None else None,
|
||||
'invalidity_date_critical': entry['invalidity_date_critical'],
|
||||
}
|
||||
|
||||
def get_info(self):
|
||||
result = {
|
||||
'changed': False,
|
||||
'last_update': None,
|
||||
'next_update': None,
|
||||
'digest': None,
|
||||
'issuer_ordered': None,
|
||||
'issuer': None,
|
||||
'revoked_certificates': [],
|
||||
}
|
||||
|
||||
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
|
||||
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
|
||||
try:
|
||||
result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
|
||||
except AttributeError:
|
||||
# Older cryptography versions don't have signature_algorithm_oid yet
|
||||
dotted = crypto_utils._obj2txt(
|
||||
self.crl._backend._lib,
|
||||
self.crl._backend._ffi,
|
||||
self.crl._x509_crl.sig_alg.algorithm
|
||||
)
|
||||
oid = x509.oid.ObjectIdentifier(dotted)
|
||||
result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
|
||||
issuer = []
|
||||
for attribute in self.crl.issuer:
|
||||
issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
|
||||
result['issuer_ordered'] = issuer
|
||||
result['issuer'] = {}
|
||||
for k, v in issuer:
|
||||
result['issuer'][k] = v
|
||||
result['revoked_certificates'] = []
|
||||
for cert in self.crl:
|
||||
entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
|
||||
result['revoked_certificates'].append(self._dump_revoked(entry))
|
||||
|
||||
return result
|
||||
|
||||
def generate(self):
|
||||
# Empty method because crypto_utils.OpenSSLObject wants this
|
||||
pass
|
||||
|
||||
def dump(self):
|
||||
# Empty method because crypto_utils.OpenSSLObject wants this
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path'),
|
||||
content=dict(type='str'),
|
||||
),
|
||||
required_one_of=(
|
||||
['path', 'content'],
|
||||
),
|
||||
mutually_exclusive=(
|
||||
['path', 'content'],
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not CRYPTOGRAPHY_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
|
||||
exception=CRYPTOGRAPHY_IMP_ERR)
|
||||
|
||||
try:
|
||||
crl = CRLInfo(module)
|
||||
result = crl.get_info()
|
||||
module.exit_json(**result)
|
||||
except crypto_utils.OpenSSLObjectError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue