mirror of https://github.com/ansible/ansible.git
Merge remote-tracking branch 'upstream/devel' into devel
commit
a90f5f6ba9
@ -0,0 +1,228 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: cloudtrail
|
||||||
|
short_description: manage CloudTrail creation and deletion
|
||||||
|
description:
|
||||||
|
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21.
|
||||||
|
version_added: "2.0"
|
||||||
|
author: Ted Timmons
|
||||||
|
options:
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- add or remove CloudTrail configuration.
|
||||||
|
required: true
|
||||||
|
choices: ['enabled', 'disabled']
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- name for given CloudTrail configuration.
|
||||||
|
- This is a primary key and is used to identify the configuration.
|
||||||
|
s3_bucket_prefix:
|
||||||
|
description:
|
||||||
|
- bucket to place CloudTrail in.
|
||||||
|
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
|
||||||
|
- required when state=enabled.
|
||||||
|
required: false
|
||||||
|
s3_key_prefix:
|
||||||
|
description:
|
||||||
|
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
|
||||||
|
required: false
|
||||||
|
include_global_events:
|
||||||
|
description:
|
||||||
|
- record API calls from global services such as IAM and STS?
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
choices: ["true", "false"]
|
||||||
|
|
||||||
|
aws_secret_key:
|
||||||
|
description:
|
||||||
|
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||||
|
version_added: "1.5"
|
||||||
|
aws_access_key:
|
||||||
|
description:
|
||||||
|
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: [ 'ec2_access_key', 'access_key' ]
|
||||||
|
version_added: "1.5"
|
||||||
|
region:
|
||||||
|
description:
|
||||||
|
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||||
|
required: false
|
||||||
|
aliases: ['aws_region', 'ec2_region']
|
||||||
|
version_added: "1.5"
|
||||||
|
|
||||||
|
extends_documentation_fragment: aws
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: enable cloudtrail
|
||||||
|
local_action: cloudtrail
|
||||||
|
state=enabled name=main s3_bucket_name=ourbucket
|
||||||
|
s3_key_prefix=cloudtrail region=us-east-1
|
||||||
|
|
||||||
|
- name: enable cloudtrail with different configuration
|
||||||
|
local_action: cloudtrail
|
||||||
|
state=enabled name=main s3_bucket_name=ourbucket2
|
||||||
|
s3_key_prefix='' region=us-east-1
|
||||||
|
|
||||||
|
- name: remove cloudtrail
|
||||||
|
local_action: cloudtrail state=absent name=main region=us-east-1
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
|
boto_import_failed = False
|
||||||
|
try:
|
||||||
|
import boto
|
||||||
|
import boto.cloudtrail
|
||||||
|
from boto.regioninfo import RegionInfo
|
||||||
|
except ImportError:
|
||||||
|
boto_import_failed = True
|
||||||
|
|
||||||
|
class CloudTrailManager:
|
||||||
|
"""Handles cloudtrail configuration"""
|
||||||
|
|
||||||
|
def __init__(self, module, region=None, **aws_connect_params):
|
||||||
|
self.module = module
|
||||||
|
self.region = region
|
||||||
|
self.aws_connect_params = aws_connect_params
|
||||||
|
self.changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
|
||||||
|
except boto.exception.NoAuthHandlerFound, e:
|
||||||
|
self.module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
def view_status(self, name):
|
||||||
|
return self.conn.get_trail_status(name)
|
||||||
|
|
||||||
|
def view(self, name):
|
||||||
|
ret = self.conn.describe_trails(trail_name_list=[name])
|
||||||
|
trailList = ret.get('trailList', [])
|
||||||
|
if len(trailList) == 1:
|
||||||
|
return trailList[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
def exists(self, name=None):
|
||||||
|
ret = self.view(name)
|
||||||
|
if ret:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def enable_logging(self, name):
|
||||||
|
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
|
||||||
|
self.conn.start_logging(name)
|
||||||
|
|
||||||
|
|
||||||
|
def enable(self, **create_args):
|
||||||
|
return self.conn.create_trail(**create_args)
|
||||||
|
|
||||||
|
def update(self, **create_args):
|
||||||
|
return self.conn.update_trail(**create_args)
|
||||||
|
|
||||||
|
def delete(self, name):
|
||||||
|
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
|
||||||
|
self.conn.delete_trail(name)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
if not has_libcloud:
|
||||||
|
module.fail_json(msg='boto is required.')
|
||||||
|
|
||||||
|
argument_spec = ec2_argument_spec()
|
||||||
|
argument_spec.update(dict(
|
||||||
|
state={'required': True, 'choices': ['enabled', 'disabled'] },
|
||||||
|
name={'required': True, 'type': 'str' },
|
||||||
|
s3_bucket_name={'required': False, 'type': 'str' },
|
||||||
|
s3_key_prefix={'default':'', 'required': False, 'type': 'str' },
|
||||||
|
include_global_events={'default':True, 'required': False, 'type': 'bool' },
|
||||||
|
))
|
||||||
|
required_together = ( ['state', 's3_bucket_name'] )
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
||||||
|
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
|
||||||
|
aws_connect_params = dict(aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key)
|
||||||
|
|
||||||
|
if not region:
|
||||||
|
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||||
|
|
||||||
|
ct_name = module.params['name']
|
||||||
|
s3_bucket_name = module.params['s3_bucket_name']
|
||||||
|
# remove trailing slash from the key prefix, really messes up the key structure.
|
||||||
|
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
|
||||||
|
include_global_events = module.params['include_global_events']
|
||||||
|
|
||||||
|
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
|
||||||
|
# module.fail_json(msg="ELBs are required for registration or viewing")
|
||||||
|
|
||||||
|
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
|
||||||
|
|
||||||
|
results = { 'changed': False }
|
||||||
|
if module.params['state'] == 'enabled':
|
||||||
|
results['exists'] = cf_man.exists(name=ct_name)
|
||||||
|
if results['exists']:
|
||||||
|
results['view'] = cf_man.view(ct_name)
|
||||||
|
# only update if the values have changed.
|
||||||
|
if results['view']['S3BucketName'] != s3_bucket_name or \
|
||||||
|
results['view']['S3KeyPrefix'] != s3_key_prefix or \
|
||||||
|
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
|
||||||
|
if not module.check_mode:
|
||||||
|
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
|
||||||
|
results['changed'] = True
|
||||||
|
else:
|
||||||
|
if not module.check_mode:
|
||||||
|
# doesn't exist. create it.
|
||||||
|
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
|
||||||
|
results['changed'] = True
|
||||||
|
|
||||||
|
# given cloudtrail should exist now. Enable the logging.
|
||||||
|
results['view_status'] = cf_man.view_status(ct_name)
|
||||||
|
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
|
||||||
|
if not results['was_logging_enabled']:
|
||||||
|
if not module.check_mode:
|
||||||
|
cf_man.enable_logging(ct_name)
|
||||||
|
results['logging_enabled'] = True
|
||||||
|
results['changed'] = True
|
||||||
|
|
||||||
|
# delete the cloudtrai
|
||||||
|
elif module.params['state'] == 'disabled':
|
||||||
|
# check to see if it exists before deleting.
|
||||||
|
results['exists'] = cf_man.exists(name=ct_name)
|
||||||
|
if results['exists']:
|
||||||
|
# it exists, so we should delete it and mark changed.
|
||||||
|
if not module.check_mode:
|
||||||
|
cf_man.delete(ct_name)
|
||||||
|
results['changed'] = True
|
||||||
|
|
||||||
|
module.exit_json(**results)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
|
main()
|
@ -0,0 +1,232 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_affinitygroup
|
||||||
|
short_description: Manages affinity groups on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Create and remove affinity groups.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the affinity group.
|
||||||
|
required: true
|
||||||
|
affinty_type:
|
||||||
|
description:
|
||||||
|
- Type of the affinity group. If not specified, first found affinity type is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- Description of the affinity group.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the affinity group.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
poll_async:
|
||||||
|
description:
|
||||||
|
- Poll async jobs until job has finished.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Create a affinity group
|
||||||
|
- local_action:
|
||||||
|
module: cs_affinitygroup
|
||||||
|
name: haproxy
|
||||||
|
affinty_type: host anti-affinity
|
||||||
|
|
||||||
|
|
||||||
|
# Remove a affinity group
|
||||||
|
- local_action:
|
||||||
|
module: cs_affinitygroup
|
||||||
|
name: haproxy
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
name:
|
||||||
|
description: Name of affinity group.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: app
|
||||||
|
description:
|
||||||
|
description: Description of affinity group.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: application affinity group
|
||||||
|
affinity_type:
|
||||||
|
description: Type of affinity group.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: host anti-affinity
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
self.affinity_group = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_affinity_group(self):
|
||||||
|
if not self.affinity_group:
|
||||||
|
affinity_group_name = self.module.params.get('name')
|
||||||
|
|
||||||
|
affinity_groups = self.cs.listAffinityGroups()
|
||||||
|
if affinity_groups:
|
||||||
|
for a in affinity_groups['affinitygroup']:
|
||||||
|
if a['name'] == affinity_group_name:
|
||||||
|
self.affinity_group = a
|
||||||
|
break
|
||||||
|
return self.affinity_group
|
||||||
|
|
||||||
|
|
||||||
|
def get_affinity_type(self):
|
||||||
|
affinity_type = self.module.params.get('affinty_type')
|
||||||
|
|
||||||
|
affinity_types = self.cs.listAffinityGroupTypes()
|
||||||
|
if affinity_types:
|
||||||
|
if not affinity_type:
|
||||||
|
return affinity_types['affinityGroupType'][0]['type']
|
||||||
|
|
||||||
|
for a in affinity_types['affinityGroupType']:
|
||||||
|
if a['type'] == affinity_type:
|
||||||
|
return a['type']
|
||||||
|
self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
|
||||||
|
|
||||||
|
|
||||||
|
def create_affinity_group(self):
|
||||||
|
affinity_group = self.get_affinity_group()
|
||||||
|
if not affinity_group:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['type'] = self.get_affinity_type()
|
||||||
|
args['description'] = self.module.params.get('description')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.createAffinityGroup(**args)
|
||||||
|
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
affinity_group = self._poll_job(res, 'affinitygroup')
|
||||||
|
|
||||||
|
return affinity_group
|
||||||
|
|
||||||
|
|
||||||
|
def remove_affinity_group(self):
|
||||||
|
affinity_group = self.get_affinity_group()
|
||||||
|
if affinity_group:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteAffinityGroup(**args)
|
||||||
|
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
res = self._poll_job(res, 'affinitygroup')
|
||||||
|
|
||||||
|
return affinity_group
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, affinity_group):
|
||||||
|
if affinity_group:
|
||||||
|
if 'name' in affinity_group:
|
||||||
|
self.result['name'] = affinity_group['name']
|
||||||
|
if 'description' in affinity_group:
|
||||||
|
self.result['description'] = affinity_group['description']
|
||||||
|
if 'type' in affinity_group:
|
||||||
|
self.result['affinity_type'] = affinity_group['type']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
affinty_type = dict(default=None),
|
||||||
|
description = dict(default=None),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
poll_async = dict(choices=BOOLEANS, default=True),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_ag = AnsibleCloudStackAffinityGroup(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
affinity_group = acs_ag.remove_affinity_group()
|
||||||
|
else:
|
||||||
|
affinity_group = acs_ag.create_affinity_group()
|
||||||
|
|
||||||
|
result = acs_ag.get_result(affinity_group)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,266 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
module: cs_firewall
|
||||||
|
short_description: Manages firewall rules on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Creates and removes firewall rules.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
ip_address:
|
||||||
|
description:
|
||||||
|
- Public IP address the rule is assigned to.
|
||||||
|
required: true
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the firewall rule.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
protocol:
|
||||||
|
description:
|
||||||
|
- Protocol of the firewall rule.
|
||||||
|
required: false
|
||||||
|
default: 'tcp'
|
||||||
|
choices: [ 'tcp', 'udp', 'icmp' ]
|
||||||
|
cidr:
|
||||||
|
description:
|
||||||
|
- CIDR (full notation) to be used for firewall rule.
|
||||||
|
required: false
|
||||||
|
default: '0.0.0.0/0'
|
||||||
|
start_port:
|
||||||
|
description:
|
||||||
|
- Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
end_port:
|
||||||
|
description:
|
||||||
|
- End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
icmp_type:
|
||||||
|
description:
|
||||||
|
- Type of the icmp message being sent. Considered if C(protocol=icmp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
icmp_code:
|
||||||
|
description:
|
||||||
|
- Error code for this icmp message. Considered if C(protocol=icmp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1
|
||||||
|
- local_action:
|
||||||
|
module: cs_firewall
|
||||||
|
ip_address: 4.3.2.1
|
||||||
|
start_port: 80
|
||||||
|
end_port: 80
|
||||||
|
cidr: 1.2.3.4/32
|
||||||
|
|
||||||
|
|
||||||
|
# Allow inbound tcp/udp port 53 to 4.3.2.1
|
||||||
|
- local_action:
|
||||||
|
module: cs_firewall
|
||||||
|
ip_address: 4.3.2.1
|
||||||
|
start_port: 53
|
||||||
|
end_port: 53
|
||||||
|
protocol: '{{ item }}'
|
||||||
|
with_items:
|
||||||
|
- tcp
|
||||||
|
- udp
|
||||||
|
|
||||||
|
|
||||||
|
# Ensure firewall rule is removed
|
||||||
|
- local_action:
|
||||||
|
module: cs_firewall
|
||||||
|
ip_address: 4.3.2.1
|
||||||
|
start_port: 8000
|
||||||
|
end_port: 8888
|
||||||
|
cidr: 17.0.0.0/8
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackFirewall(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
self.firewall_rule = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_firewall_rule(self):
|
||||||
|
if not self.firewall_rule:
|
||||||
|
cidr = self.module.params.get('cidr')
|
||||||
|
protocol = self.module.params.get('protocol')
|
||||||
|
start_port = self.module.params.get('start_port')
|
||||||
|
end_port = self.module.params.get('end_port')
|
||||||
|
icmp_code = self.module.params.get('icmp_code')
|
||||||
|
icmp_type = self.module.params.get('icmp_type')
|
||||||
|
|
||||||
|
if protocol in ['tcp', 'udp'] and not (start_port and end_port):
|
||||||
|
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
|
||||||
|
|
||||||
|
if protocol == 'icmp' and not icmp_type:
|
||||||
|
self.module.fail_json(msg="no icmp_type set")
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['ipaddressid'] = self.get_ip_address_id()
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
|
||||||
|
firewall_rules = self.cs.listFirewallRules(**args)
|
||||||
|
if firewall_rules and 'firewallrule' in firewall_rules:
|
||||||
|
for rule in firewall_rules['firewallrule']:
|
||||||
|
type_match = self._type_cidr_match(rule, cidr)
|
||||||
|
|
||||||
|
protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \
|
||||||
|
or self._icmp_match(rule, protocol, icmp_code, icmp_type)
|
||||||
|
|
||||||
|
if type_match and protocol_match:
|
||||||
|
self.firewall_rule = rule
|
||||||
|
break
|
||||||
|
return self.firewall_rule
|
||||||
|
|
||||||
|
|
||||||
|
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
|
||||||
|
return protocol in ['tcp', 'udp'] \
|
||||||
|
and protocol == rule['protocol'] \
|
||||||
|
and start_port == int(rule['startport']) \
|
||||||
|
and end_port == int(rule['endport'])
|
||||||
|
|
||||||
|
|
||||||
|
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
|
||||||
|
return protocol == 'icmp' \
|
||||||
|
and protocol == rule['protocol'] \
|
||||||
|
and icmp_code == rule['icmpcode'] \
|
||||||
|
and icmp_type == rule['icmptype']
|
||||||
|
|
||||||
|
|
||||||
|
def _type_cidr_match(self, rule, cidr):
|
||||||
|
return cidr == rule['cidrlist']
|
||||||
|
|
||||||
|
|
||||||
|
def create_firewall_rule(self):
|
||||||
|
firewall_rule = self.get_firewall_rule()
|
||||||
|
if not firewall_rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args = {}
|
||||||
|
args['cidrlist'] = self.module.params.get('cidr')
|
||||||
|
args['protocol'] = self.module.params.get('protocol')
|
||||||
|
args['startport'] = self.module.params.get('start_port')
|
||||||
|
args['endport'] = self.module.params.get('end_port')
|
||||||
|
args['icmptype'] = self.module.params.get('icmp_type')
|
||||||
|
args['icmpcode'] = self.module.params.get('icmp_code')
|
||||||
|
args['ipaddressid'] = self.get_ip_address_id()
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
firewall_rule = self.cs.createFirewallRule(**args)
|
||||||
|
|
||||||
|
return firewall_rule
|
||||||
|
|
||||||
|
|
||||||
|
def remove_firewall_rule(self):
|
||||||
|
firewall_rule = self.get_firewall_rule()
|
||||||
|
if firewall_rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args = {}
|
||||||
|
args['id'] = firewall_rule['id']
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteFirewallRule(**args)
|
||||||
|
|
||||||
|
return firewall_rule
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, firewall_rule):
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
ip_address = dict(required=True, default=None),
|
||||||
|
cidr = dict(default='0.0.0.0/0'),
|
||||||
|
protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'),
|
||||||
|
icmp_type = dict(type='int', default=None),
|
||||||
|
icmp_code = dict(type='int', default=None),
|
||||||
|
start_port = dict(type='int', default=None),
|
||||||
|
end_port = dict(type='int', default=None),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
project = dict(default=None),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
required_together = (
|
||||||
|
['start_port', 'end_port'],
|
||||||
|
),
|
||||||
|
mutually_exclusive = (
|
||||||
|
['icmp_type', 'start_port'],
|
||||||
|
['icmp_type', 'end_port'],
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_fw = AnsibleCloudStackFirewall(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
fw_rule = acs_fw.remove_firewall_rule()
|
||||||
|
else:
|
||||||
|
fw_rule = acs_fw.create_firewall_rule()
|
||||||
|
|
||||||
|
result = acs_fw.get_result(fw_rule)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,788 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_instance
|
||||||
|
short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Host name of the instance. C(name) can only contain ASCII letters.
|
||||||
|
required: true
|
||||||
|
display_name:
|
||||||
|
description:
|
||||||
|
- Custom display name of the instances.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
group:
|
||||||
|
description:
|
||||||
|
- Group in where the new instance should be in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the instance.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ]
|
||||||
|
service_offering:
|
||||||
|
description:
|
||||||
|
- Name or id of the service offering of the new instance. If not set, first found service offering is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
template:
|
||||||
|
description:
|
||||||
|
- Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
iso:
|
||||||
|
description:
|
||||||
|
- Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
hypervisor:
|
||||||
|
description:
|
||||||
|
- Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
|
||||||
|
keyboard:
|
||||||
|
description:
|
||||||
|
- Keyboard device type for the instance.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
|
||||||
|
networks:
|
||||||
|
description:
|
||||||
|
- List of networks to use for the new instance.
|
||||||
|
required: false
|
||||||
|
default: []
|
||||||
|
aliases: [ 'network' ]
|
||||||
|
ip_address:
|
||||||
|
description:
|
||||||
|
- IPv4 address for default instance's network during creation
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
ip6_address:
|
||||||
|
description:
|
||||||
|
- IPv6 address for default instance's network.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
disk_offering:
|
||||||
|
description:
|
||||||
|
- Name of the disk offering to be used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
disk_size:
|
||||||
|
description:
|
||||||
|
- Disk size in GByte required if deploying instance from ISO.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
security_groups:
|
||||||
|
description:
|
||||||
|
- List of security groups the instance to be applied to.
|
||||||
|
required: false
|
||||||
|
default: []
|
||||||
|
aliases: [ 'security_group' ]
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the instance to be deployed in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- Name of the zone in which the instance shoud be deployed. If not set, default zone is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
ssh_key:
|
||||||
|
description:
|
||||||
|
- Name of the SSH key to be deployed on the new instance.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
affinity_groups:
|
||||||
|
description:
|
||||||
|
- Affinity groups names to be applied to the new instance.
|
||||||
|
required: false
|
||||||
|
default: []
|
||||||
|
aliases: [ 'affinity_group' ]
|
||||||
|
user_data:
|
||||||
|
description:
|
||||||
|
- Optional data (ASCII) that can be sent to the instance upon a successful deployment.
|
||||||
|
- The data will be automatically base64 encoded.
|
||||||
|
- Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
force:
|
||||||
|
description:
|
||||||
|
- Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
tags:
|
||||||
|
description:
|
||||||
|
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
|
||||||
|
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
poll_async:
|
||||||
|
description:
|
||||||
|
- Poll async jobs until job has finished.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Create a instance on CloudStack from an ISO
|
||||||
|
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
|
||||||
|
- local_action:
|
||||||
|
module: cs_instance
|
||||||
|
name: web-vm-1
|
||||||
|
iso: Linux Debian 7 64-bit
|
||||||
|
hypervisor: VMware
|
||||||
|
project: Integration
|
||||||
|
zone: ch-zrh-ix-01
|
||||||
|
service_offering: 1cpu_1gb
|
||||||
|
disk_offering: PerfPlus Storage
|
||||||
|
disk_size: 20
|
||||||
|
networks:
|
||||||
|
- Server Integration
|
||||||
|
- Sync Integration
|
||||||
|
- Storage Integration
|
||||||
|
|
||||||
|
|
||||||
|
# For changing a running instance, use the 'force' parameter
|
||||||
|
- local_action:
|
||||||
|
module: cs_instance
|
||||||
|
name: web-vm-1
|
||||||
|
display_name: web-vm-01.example.com
|
||||||
|
iso: Linux Debian 7 64-bit
|
||||||
|
service_offering: 2cpu_2gb
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
|
||||||
|
# Create or update a instance on Exoscale's public cloud
|
||||||
|
- local_action:
|
||||||
|
module: cs_instance
|
||||||
|
name: web-vm-1
|
||||||
|
template: Linux Debian 7 64-bit
|
||||||
|
service_offering: Tiny
|
||||||
|
ssh_key: john@example.com
|
||||||
|
tags:
|
||||||
|
- { key: admin, value: john }
|
||||||
|
- { key: foo, value: bar }
|
||||||
|
register: vm
|
||||||
|
|
||||||
|
- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}'
|
||||||
|
|
||||||
|
|
||||||
|
# Ensure a instance has stopped
|
||||||
|
- local_action: cs_instance name=web-vm-1 state=stopped
|
||||||
|
|
||||||
|
|
||||||
|
# Ensure a instance is running
|
||||||
|
- local_action: cs_instance name=web-vm-1 state=started
|
||||||
|
|
||||||
|
|
||||||
|
# Remove a instance
|
||||||
|
- local_action: cs_instance name=web-vm-1 state=absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
id:
|
||||||
|
description: ID of the instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
|
||||||
|
name:
|
||||||
|
description: Name of the instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web-01
|
||||||
|
display_name:
|
||||||
|
description: Display name of the instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web-01
|
||||||
|
group:
|
||||||
|
description: Group name of the instance is related.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web
|
||||||
|
created:
|
||||||
|
description: Date of the instance was created.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2014-12-01T14:57:57+0100
|
||||||
|
password_enabled:
|
||||||
|
description: True if password setting is enabled.
|
||||||
|
returned: success
|
||||||
|
type: boolean
|
||||||
|
sample: true
|
||||||
|
password:
|
||||||
|
description: The password of the instance if exists.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Ge2oe7Do
|
||||||
|
ssh_key:
|
||||||
|
description: Name of ssh key deployed to instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: key@work
|
||||||
|
project:
|
||||||
|
description: Name of project the instance is related to.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Production
|
||||||
|
default_ip:
|
||||||
|
description: Default IP address of the instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 10.23.37.42
|
||||||
|
public_ip:
|
||||||
|
description: Public IP address with instance via static nat rule.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 1.2.3.4
|
||||||
|
iso:
|
||||||
|
description: Name of ISO the instance was deployed with.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Debian-8-64bit
|
||||||
|
template:
|
||||||
|
description: Name of template the instance was deployed with.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Debian-8-64bit
|
||||||
|
service_offering:
|
||||||
|
description: Name of the service offering the instance has.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2cpu_2gb
|
||||||
|
zone:
|
||||||
|
description: Name of zone the instance is in.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: ch-gva-2
|
||||||
|
state:
|
||||||
|
description: State of the instance.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Running
|
||||||
|
security_groups:
|
||||||
|
description: Security groups the instance is in.
|
||||||
|
returned: success
|
||||||
|
type: list
|
||||||
|
sample: '[ "default" ]'
|
||||||
|
affinity_groups:
|
||||||
|
description: Affinity groups the instance is in.
|
||||||
|
returned: success
|
||||||
|
type: list
|
||||||
|
sample: '[ "webservers" ]'
|
||||||
|
tags:
|
||||||
|
description: List of resource tags associated with the instance.
|
||||||
|
returned: success
|
||||||
|
type: dict
|
||||||
|
sample: '[ { "key": "foo", "value": "bar" } ]'
|
||||||
|
'''
|
||||||
|
|
||||||
|
import base64
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackInstance(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.instance = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_service_offering_id(self):
|
||||||
|
service_offering = self.module.params.get('service_offering')
|
||||||
|
|
||||||
|
service_offerings = self.cs.listServiceOfferings()
|
||||||
|
if service_offerings:
|
||||||
|
if not service_offering:
|
||||||
|
return service_offerings['serviceoffering'][0]['id']
|
||||||
|
|
||||||
|
for s in service_offerings['serviceoffering']:
|
||||||
|
if service_offering in [ s['name'], s['id'] ]:
|
||||||
|
return s['id']
|
||||||
|
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
|
||||||
|
|
||||||
|
|
||||||
|
def get_template_or_iso_id(self):
|
||||||
|
template = self.module.params.get('template')
|
||||||
|
iso = self.module.params.get('iso')
|
||||||
|
|
||||||
|
if not template and not iso:
|
||||||
|
self.module.fail_json(msg="Template or ISO is required.")
|
||||||
|
|
||||||
|
if template and iso:
|
||||||
|
self.module.fail_json(msg="Template are ISO are mutually exclusive.")
|
||||||
|
|
||||||
|
if template:
|
||||||
|
templates = self.cs.listTemplates(templatefilter='executable')
|
||||||
|
if templates:
|
||||||
|
for t in templates['template']:
|
||||||
|
if template in [ t['displaytext'], t['name'], t['id'] ]:
|
||||||
|
return t['id']
|
||||||
|
self.module.fail_json(msg="Template '%s' not found" % template)
|
||||||
|
|
||||||
|
elif iso:
|
||||||
|
isos = self.cs.listIsos()
|
||||||
|
if isos:
|
||||||
|
for i in isos['iso']:
|
||||||
|
if iso in [ i['displaytext'], i['name'], i['id'] ]:
|
||||||
|
return i['id']
|
||||||
|
self.module.fail_json(msg="ISO '%s' not found" % iso)
|
||||||
|
|
||||||
|
|
||||||
|
def get_disk_offering_id(self):
|
||||||
|
disk_offering = self.module.params.get('disk_offering')
|
||||||
|
|
||||||
|
if not disk_offering:
|
||||||
|
return None
|
||||||
|
|
||||||
|
disk_offerings = self.cs.listDiskOfferings()
|
||||||
|
if disk_offerings:
|
||||||
|
for d in disk_offerings['diskoffering']:
|
||||||
|
if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
|
||||||
|
return d['id']
|
||||||
|
self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
|
||||||
|
|
||||||
|
|
||||||
|
def get_instance(self):
|
||||||
|
instance = self.instance
|
||||||
|
if not instance:
|
||||||
|
instance_name = self.module.params.get('name')
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
instances = self.cs.listVirtualMachines(**args)
|
||||||
|
if instances:
|
||||||
|
for v in instances['virtualmachine']:
|
||||||
|
if instance_name in [ v['name'], v['displayname'], v['id'] ]:
|
||||||
|
self.instance = v
|
||||||
|
break
|
||||||
|
return self.instance
|
||||||
|
|
||||||
|
|
||||||
|
def get_network_ids(self):
|
||||||
|
network_names = self.module.params.get('networks')
|
||||||
|
if not network_names:
|
||||||
|
return None
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
networks = self.cs.listNetworks(**args)
|
||||||
|
if not networks:
|
||||||
|
self.module.fail_json(msg="No networks available")
|
||||||
|
|
||||||
|
network_ids = []
|
||||||
|
network_displaytexts = []
|
||||||
|
for network_name in network_names:
|
||||||
|
for n in networks['network']:
|
||||||
|
if network_name in [ n['displaytext'], n['name'], n['id'] ]:
|
||||||
|
network_ids.append(n['id'])
|
||||||
|
network_displaytexts.append(n['name'])
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(network_ids) != len(network_names):
|
||||||
|
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
|
||||||
|
|
||||||
|
return ','.join(network_ids)
|
||||||
|
|
||||||
|
|
||||||
|
def present_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if not instance:
|
||||||
|
instance = self.deploy_instance()
|
||||||
|
else:
|
||||||
|
instance = self.update_instance(instance)
|
||||||
|
|
||||||
|
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_data(self):
|
||||||
|
user_data = self.module.params.get('user_data')
|
||||||
|
if user_data:
|
||||||
|
user_data = base64.b64encode(user_data)
|
||||||
|
return user_data
|
||||||
|
|
||||||
|
|
||||||
|
def get_display_name(self):
|
||||||
|
display_name = self.module.params.get('display_name')
|
||||||
|
if not display_name:
|
||||||
|
display_name = self.module.params.get('name')
|
||||||
|
return display_name
|
||||||
|
|
||||||
|
|
||||||
|
def deploy_instance(self):
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['templateid'] = self.get_template_or_iso_id()
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
args['serviceofferingid'] = self.get_service_offering_id()
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['diskofferingid'] = self.get_disk_offering_id()
|
||||||
|
args['networkids'] = self.get_network_ids()
|
||||||
|
args['hypervisor'] = self.get_hypervisor()
|
||||||
|
args['userdata'] = self.get_user_data()
|
||||||
|
args['keyboard'] = self.module.params.get('keyboard')
|
||||||
|
args['ipaddress'] = self.module.params.get('ip_address')
|
||||||
|
args['ip6address'] = self.module.params.get('ip6_address')
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['group'] = self.module.params.get('group')
|
||||||
|
args['keypair'] = self.module.params.get('ssh_key')
|
||||||
|
args['size'] = self.module.params.get('disk_size')
|
||||||
|
args['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
|
||||||
|
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
|
||||||
|
|
||||||
|
instance = None
|
||||||
|
if not self.module.check_mode:
|
||||||
|
instance = self.cs.deployVirtualMachine(**args)
|
||||||
|
|
||||||
|
if 'errortext' in instance:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def update_instance(self, instance):
|
||||||
|
args_service_offering = {}
|
||||||
|
args_service_offering['id'] = instance['id']
|
||||||
|
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
|
||||||
|
|
||||||
|
args_instance_update = {}
|
||||||
|
args_instance_update['id'] = instance['id']
|
||||||
|
args_instance_update['group'] = self.module.params.get('group')
|
||||||
|
args_instance_update['displayname'] = self.get_display_name()
|
||||||
|
args_instance_update['userdata'] = self.get_user_data()
|
||||||
|
args_instance_update['ostypeid'] = self.get_os_type_id()
|
||||||
|
|
||||||
|
args_ssh_key = {}
|
||||||
|
args_ssh_key['id'] = instance['id']
|
||||||
|
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
|
||||||
|
args_ssh_key['projectid'] = self.get_project_id()
|
||||||
|
|
||||||
|
if self._has_changed(args_service_offering, instance) or \
|
||||||
|
self._has_changed(args_instance_update, instance) or \
|
||||||
|
self._has_changed(args_ssh_key, instance):
|
||||||
|
|
||||||
|
force = self.module.params.get('force')
|
||||||
|
instance_state = instance['state'].lower()
|
||||||
|
|
||||||
|
if instance_state == 'stopped' or force:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
|
||||||
|
# Ensure VM has stopped
|
||||||
|
instance = self.stop_instance()
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
self.instance = instance
|
||||||
|
|
||||||
|
# Change service offering
|
||||||
|
if self._has_changed(args_service_offering, instance):
|
||||||
|
res = self.cs.changeServiceForVirtualMachine(**args_service_offering)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
instance = res['virtualmachine']
|
||||||
|
self.instance = instance
|
||||||
|
|
||||||
|
# Update VM
|
||||||
|
if self._has_changed(args_instance_update, instance):
|
||||||
|
res = self.cs.updateVirtualMachine(**args_instance_update)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
instance = res['virtualmachine']
|
||||||
|
self.instance = instance
|
||||||
|
|
||||||
|
# Reset SSH key
|
||||||
|
if self._has_changed(args_ssh_key, instance):
|
||||||
|
instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key)
|
||||||
|
if 'errortext' in instance:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
|
||||||
|
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
self.instance = instance
|
||||||
|
|
||||||
|
# Start VM again if it was running before
|
||||||
|
if instance_state == 'running':
|
||||||
|
instance = self.start_instance()
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def absent_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if instance:
|
||||||
|
if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.destroyVirtualMachine(id=instance['id'])
|
||||||
|
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(res, 'virtualmachine')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def expunge_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if instance:
|
||||||
|
res = {}
|
||||||
|
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.expungeVirtualMachine(id=instance['id'])
|
||||||
|
|
||||||
|
elif instance['state'].lower() not in [ 'expunging' ]:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
|
||||||
|
|
||||||
|
if res and 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(res, 'virtualmachine')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def stop_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if not instance:
|
||||||
|
self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name'))
|
||||||
|
|
||||||
|
if instance['state'].lower() in ['stopping', 'stopped']:
|
||||||
|
return instance
|
||||||
|
|
||||||
|
if instance['state'].lower() in ['starting', 'running']:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
instance = self.cs.stopVirtualMachine(id=instance['id'])
|
||||||
|
|
||||||
|
if 'errortext' in instance:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def start_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if not instance:
|
||||||
|
self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name'))
|
||||||
|
|
||||||
|
if instance['state'].lower() in ['starting', 'running']:
|
||||||
|
return instance
|
||||||
|
|
||||||
|
if instance['state'].lower() in ['stopped', 'stopping']:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
instance = self.cs.startVirtualMachine(id=instance['id'])
|
||||||
|
|
||||||
|
if 'errortext' in instance:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def restart_instance(self):
|
||||||
|
instance = self.get_instance()
|
||||||
|
if not instance:
|
||||||
|
module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name'))
|
||||||
|
|
||||||
|
if instance['state'].lower() in [ 'running', 'starting' ]:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
instance = self.cs.rebootVirtualMachine(id=instance['id'])
|
||||||
|
|
||||||
|
if 'errortext' in instance:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
instance = self._poll_job(instance, 'virtualmachine')
|
||||||
|
|
||||||
|
elif instance['state'].lower() in [ 'stopping', 'stopped' ]:
|
||||||
|
instance = self.start_instance()
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, instance):
|
||||||
|
if instance:
|
||||||
|
if 'id' in instance:
|
||||||
|
self.result['id'] = instance['id']
|
||||||
|
if 'name' in instance:
|
||||||
|
self.result['name'] = instance['name']
|
||||||
|
if 'displayname' in instance:
|
||||||
|
self.result['display_name'] = instance['displayname']
|
||||||
|
if 'group' in instance:
|
||||||
|
self.result['group'] = instance['group']
|
||||||
|
if 'project' in instance:
|
||||||
|
self.result['project'] = instance['project']
|
||||||
|
if 'publicip' in instance:
|
||||||
|
self.result['public_ip'] = instance['public_ip']
|
||||||
|
if 'passwordenabled' in instance:
|
||||||
|
self.result['password_enabled'] = instance['passwordenabled']
|
||||||
|
if 'password' in instance:
|
||||||
|
self.result['password'] = instance['password']
|
||||||
|
if 'serviceofferingname' in instance:
|
||||||
|
self.result['service_offering'] = instance['serviceofferingname']
|
||||||
|
if 'zonename' in instance:
|
||||||
|
self.result['zone'] = instance['zonename']
|
||||||
|
if 'templatename' in instance:
|
||||||
|
self.result['template'] = instance['templatename']
|
||||||
|
if 'isoname' in instance:
|
||||||
|
self.result['iso'] = instance['isoname']
|
||||||
|
if 'keypair' in instance:
|
||||||
|
self.result['ssh_key'] = instance['keypair']
|
||||||
|
if 'created' in instance:
|
||||||
|
self.result['created'] = instance['created']
|
||||||
|
if 'state' in instance:
|
||||||
|
self.result['state'] = instance['state']
|
||||||
|
if 'tags' in instance:
|
||||||
|
self.result['tags'] = []
|
||||||
|
for tag in instance['tags']:
|
||||||
|
result_tag = {}
|
||||||
|
result_tag['key'] = tag['key']
|
||||||
|
result_tag['value'] = tag['value']
|
||||||
|
self.result['tags'].append(result_tag)
|
||||||
|
if 'securitygroup' in instance:
|
||||||
|
security_groups = []
|
||||||
|
for securitygroup in instance['securitygroup']:
|
||||||
|
security_groups.append(securitygroup['name'])
|
||||||
|
self.result['security_groups'] = security_groups
|
||||||
|
if 'affinitygroup' in instance:
|
||||||
|
affinity_groups = []
|
||||||
|
for affinitygroup in instance['affinitygroup']:
|
||||||
|
affinity_groups.append(affinitygroup['name'])
|
||||||
|
self.result['affinity_groups'] = affinity_groups
|
||||||
|
if 'nic' in instance:
|
||||||
|
for nic in instance['nic']:
|
||||||
|
if nic['isdefault']:
|
||||||
|
self.result['default_ip'] = nic['ipaddress']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
display_name = dict(default=None),
|
||||||
|
group = dict(default=None),
|
||||||
|
state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'),
|
||||||
|
service_offering = dict(default=None),
|
||||||
|
template = dict(default=None),
|
||||||
|
iso = dict(default=None),
|
||||||
|
networks = dict(type='list', aliases=[ 'network' ], default=None),
|
||||||
|
ip_address = dict(defaul=None),
|
||||||
|
ip6_address = dict(defaul=None),
|
||||||
|
disk_offering = dict(default=None),
|
||||||
|
disk_size = dict(type='int', default=None),
|
||||||
|
keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None),
|
||||||
|
hypervisor = dict(default=None),
|
||||||
|
security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]),
|
||||||
|
affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
|
||||||
|
project = dict(default=None),
|
||||||
|
user_data = dict(default=None),
|
||||||
|
zone = dict(default=None),
|
||||||
|
ssh_key = dict(default=None),
|
||||||
|
force = dict(choices=BOOLEANS, default=False),
|
||||||
|
tags = dict(type='list', aliases=[ 'tag' ], default=None),
|
||||||
|
poll_async = dict(choices=BOOLEANS, default=True),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_instance = AnsibleCloudStackInstance(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state in ['absent', 'destroyed']:
|
||||||
|
instance = acs_instance.absent_instance()
|
||||||
|
|
||||||
|
elif state in ['expunged']:
|
||||||
|
instance = acs_instance.expunge_instance()
|
||||||
|
|
||||||
|
elif state in ['present', 'deployed']:
|
||||||
|
instance = acs_instance.present_instance()
|
||||||
|
|
||||||
|
elif state in ['stopped']:
|
||||||
|
instance = acs_instance.stop_instance()
|
||||||
|
|
||||||
|
elif state in ['started']:
|
||||||
|
instance = acs_instance.start_instance()
|
||||||
|
|
||||||
|
elif state in ['restarted']:
|
||||||
|
instance = acs_instance.restart_instance()
|
||||||
|
|
||||||
|
if instance and 'state' in instance and instance['state'].lower() == 'error':
|
||||||
|
module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
|
||||||
|
|
||||||
|
result = acs_instance.get_result(instance)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,324 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_iso
|
||||||
|
short_description: Manages ISOs images on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Register and remove ISO images.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the ISO.
|
||||||
|
required: true
|
||||||
|
url:
|
||||||
|
description:
|
||||||
|
- URL where the ISO can be downloaded from. Required if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
os_type:
|
||||||
|
description:
|
||||||
|
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
is_ready:
|
||||||
|
description:
|
||||||
|
- This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false).
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
aliases: []
|
||||||
|
is_public:
|
||||||
|
description:
|
||||||
|
- Register the ISO to be publicly available to all users. Only used if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
is_featured:
|
||||||
|
description:
|
||||||
|
- Register the ISO to be featured. Only used if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
is_dynamically_scalable:
|
||||||
|
description:
|
||||||
|
- Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
aliases: []
|
||||||
|
checksum:
|
||||||
|
description:
|
||||||
|
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
bootable:
|
||||||
|
description:
|
||||||
|
- Register the ISO to be bootable. Only used if C(state) is present.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the ISO to be registered in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
iso_filter:
|
||||||
|
description:
|
||||||
|
- Name of the filter used to search for the ISO.
|
||||||
|
required: false
|
||||||
|
default: 'self'
|
||||||
|
choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the ISO.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Register an ISO if ISO name does not already exist.
|
||||||
|
- local_action:
|
||||||
|
module: cs_iso
|
||||||
|
name: Debian 7 64-bit
|
||||||
|
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
|
||||||
|
os_type: Debian GNU/Linux 7(64-bit)
|
||||||
|
|
||||||
|
|
||||||
|
# Register an ISO with given name if ISO md5 checksum does not already exist.
|
||||||
|
- local_action:
|
||||||
|
module: cs_iso
|
||||||
|
name: Debian 7 64-bit
|
||||||
|
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
|
||||||
|
os_type:
|
||||||
|
checksum: 0b31bccccb048d20b551f70830bb7ad0
|
||||||
|
|
||||||
|
|
||||||
|
# Remove an ISO by name
|
||||||
|
- local_action:
|
||||||
|
module: cs_iso
|
||||||
|
name: Debian 7 64-bit
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
|
||||||
|
# Remove an ISO by checksum
|
||||||
|
- local_action:
|
||||||
|
module: cs_iso
|
||||||
|
name: Debian 7 64-bit
|
||||||
|
checksum: 0b31bccccb048d20b551f70830bb7ad0
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
name:
|
||||||
|
description: Name of the ISO.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Debian 7 64-bit
|
||||||
|
displaytext:
|
||||||
|
description: Text to be displayed of the ISO.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Debian 7.7 64-bit minimal 2015-03-19
|
||||||
|
zone:
|
||||||
|
description: Name of zone the ISO is registered in.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: zuerich
|
||||||
|
status:
|
||||||
|
description: Status of the ISO.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Successfully Installed
|
||||||
|
is_ready:
|
||||||
|
description: True if the ISO is ready to be deployed from.
|
||||||
|
returned: success
|
||||||
|
type: boolean
|
||||||
|
sample: true
|
||||||
|
checksum:
|
||||||
|
description: MD5 checksum of the ISO.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 0b31bccccb048d20b551f70830bb7ad0
|
||||||
|
created:
|
||||||
|
description: Date of registering.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2015-03-29T14:57:06+0200
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackIso(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
self.iso = None
|
||||||
|
|
||||||
|
def register_iso(self):
|
||||||
|
iso = self.get_iso()
|
||||||
|
if not iso:
|
||||||
|
args = {}
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
|
||||||
|
args['bootable'] = self.module.params.get('bootable')
|
||||||
|
args['ostypeid'] = self.get_os_type_id()
|
||||||
|
if args['bootable'] and not args['ostypeid']:
|
||||||
|
self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
|
||||||
|
|
||||||
|
args['url'] = self.module.params.get('url')
|
||||||
|
if not args['url']:
|
||||||
|
self.module.fail_json(msg="URL is requried.")
|
||||||
|
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['displaytext'] = self.module.params.get('name')
|
||||||
|
args['checksum'] = self.module.params.get('checksum')
|
||||||
|
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
|
||||||
|
args['isfeatured'] = self.module.params.get('is_featured')
|
||||||
|
args['ispublic'] = self.module.params.get('is_public')
|
||||||
|
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.registerIso(**args)
|
||||||
|
iso = res['iso'][0]
|
||||||
|
return iso
|
||||||
|
|
||||||
|
|
||||||
|
def get_iso(self):
|
||||||
|
if not self.iso:
|
||||||
|
args = {}
|
||||||
|
args['isready'] = self.module.params.get('is_ready')
|
||||||
|
args['isofilter'] = self.module.params.get('iso_filter')
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
|
||||||
|
# if checksum is set, we only look on that.
|
||||||
|
checksum = self.module.params.get('checksum')
|
||||||
|
if not checksum:
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
|
||||||
|
isos = self.cs.listIsos(**args)
|
||||||
|
if isos:
|
||||||
|
if not checksum:
|
||||||
|
self.iso = isos['iso'][0]
|
||||||
|
else:
|
||||||
|
for i in isos['iso']:
|
||||||
|
if i['checksum'] == checksum:
|
||||||
|
self.iso = i
|
||||||
|
break
|
||||||
|
return self.iso
|
||||||
|
|
||||||
|
|
||||||
|
def remove_iso(self):
|
||||||
|
iso = self.get_iso()
|
||||||
|
if iso:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args = {}
|
||||||
|
args['id'] = iso['id']
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['zoneid'] = self.get_zone_id()
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteIso(**args)
|
||||||
|
return iso
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, iso):
|
||||||
|
if iso:
|
||||||
|
if 'displaytext' in iso:
|
||||||
|
self.result['displaytext'] = iso['displaytext']
|
||||||
|
if 'name' in iso:
|
||||||
|
self.result['name'] = iso['name']
|
||||||
|
if 'zonename' in iso:
|
||||||
|
self.result['zone'] = iso['zonename']
|
||||||
|
if 'checksum' in iso:
|
||||||
|
self.result['checksum'] = iso['checksum']
|
||||||
|
if 'status' in iso:
|
||||||
|
self.result['status'] = iso['status']
|
||||||
|
if 'isready' in iso:
|
||||||
|
self.result['is_ready'] = iso['isready']
|
||||||
|
if 'created' in iso:
|
||||||
|
self.result['created'] = iso['created']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True, default=None),
|
||||||
|
url = dict(default=None),
|
||||||
|
os_type = dict(default=None),
|
||||||
|
zone = dict(default=None),
|
||||||
|
iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]),
|
||||||
|
project = dict(default=None),
|
||||||
|
checksum = dict(default=None),
|
||||||
|
is_ready = dict(choices=BOOLEANS, default=False),
|
||||||
|
bootable = dict(choices=BOOLEANS, default=True),
|
||||||
|
is_featured = dict(choices=BOOLEANS, default=False),
|
||||||
|
is_dynamically_scalable = dict(choices=BOOLEANS, default=False),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_iso = AnsibleCloudStackIso(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
iso = acs_iso.remove_iso()
|
||||||
|
else:
|
||||||
|
iso = acs_iso.register_iso()
|
||||||
|
|
||||||
|
result = acs_iso.get_result(iso)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,198 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_securitygroup
|
||||||
|
short_description: Manages security groups on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Create and remove security groups.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the security group.
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- Description of the security group.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the security group.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the security group to be created in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Create a security group
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup
|
||||||
|
name: default
|
||||||
|
description: default security group
|
||||||
|
|
||||||
|
|
||||||
|
# Remove a security group
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup
|
||||||
|
name: default
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
name:
|
||||||
|
description: Name of security group.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: app
|
||||||
|
description:
|
||||||
|
description: Description of security group.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: application security group
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
self.security_group = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_security_group(self):
|
||||||
|
if not self.security_group:
|
||||||
|
sg_name = self.module.params.get('name')
|
||||||
|
args = {}
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
sgs = self.cs.listSecurityGroups(**args)
|
||||||
|
if sgs:
|
||||||
|
for s in sgs['securitygroup']:
|
||||||
|
if s['name'] == sg_name:
|
||||||
|
self.security_group = s
|
||||||
|
break
|
||||||
|
return self.security_group
|
||||||
|
|
||||||
|
|
||||||
|
def create_security_group(self):
|
||||||
|
security_group = self.get_security_group()
|
||||||
|
if not security_group:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['description'] = self.module.params.get('description')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.createSecurityGroup(**args)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
security_group = res['securitygroup']
|
||||||
|
|
||||||
|
return security_group
|
||||||
|
|
||||||
|
|
||||||
|
def remove_security_group(self):
|
||||||
|
security_group = self.get_security_group()
|
||||||
|
if security_group:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteSecurityGroup(**args)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
return security_group
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, security_group):
|
||||||
|
if security_group:
|
||||||
|
if 'name' in security_group:
|
||||||
|
self.result['name'] = security_group['name']
|
||||||
|
if 'description' in security_group:
|
||||||
|
self.result['description'] = security_group['description']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
description = dict(default=None),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
project = dict(default=None),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_sg = AnsibleCloudStackSecurityGroup(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
sg = acs_sg.remove_security_group()
|
||||||
|
else:
|
||||||
|
sg = acs_sg.create_security_group()
|
||||||
|
|
||||||
|
result = acs_sg.get_result(sg)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,439 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
short_description: Manages security group rules on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Add and remove security group rules.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
security_group:
|
||||||
|
description:
|
||||||
|
- Name of the security group the rule is related to. The security group must be existing.
|
||||||
|
required: true
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the security group rule.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
protocol:
|
||||||
|
description:
|
||||||
|
- Protocol of the security group rule.
|
||||||
|
required: false
|
||||||
|
default: 'tcp'
|
||||||
|
choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ]
|
||||||
|
type:
|
||||||
|
description:
|
||||||
|
- Ingress or egress security group rule.
|
||||||
|
required: false
|
||||||
|
default: 'ingress'
|
||||||
|
choices: [ 'ingress', 'egress' ]
|
||||||
|
cidr:
|
||||||
|
description:
|
||||||
|
- CIDR (full notation) to be used for security group rule.
|
||||||
|
required: false
|
||||||
|
default: '0.0.0.0/0'
|
||||||
|
user_security_group:
|
||||||
|
description:
|
||||||
|
- Security group this rule is based of.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
start_port:
|
||||||
|
description:
|
||||||
|
- Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: [ 'port' ]
|
||||||
|
end_port:
|
||||||
|
description:
|
||||||
|
- End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
icmp_type:
|
||||||
|
description:
|
||||||
|
- Type of the icmp message being sent. Required if C(protocol=icmp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
icmp_code:
|
||||||
|
description:
|
||||||
|
- Error code for this icmp message. Required if C(protocol=icmp).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the security group to be created in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
poll_async:
|
||||||
|
description:
|
||||||
|
- Poll async jobs until job has finished.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
security_group: default
|
||||||
|
port: 80
|
||||||
|
cidr: 1.2.3.4/32
|
||||||
|
|
||||||
|
|
||||||
|
# Allow tcp/udp outbound added to security group 'default'
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
security_group: default
|
||||||
|
type: egress
|
||||||
|
start_port: 1
|
||||||
|
end_port: 65535
|
||||||
|
protocol: '{{ item }}'
|
||||||
|
with_items:
|
||||||
|
- tcp
|
||||||
|
- udp
|
||||||
|
|
||||||
|
|
||||||
|
# Allow inbound icmp from 0.0.0.0/0 added to security group 'default'
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
security_group: default
|
||||||
|
protocol: icmp
|
||||||
|
icmp_code: -1
|
||||||
|
icmp_type: -1
|
||||||
|
|
||||||
|
|
||||||
|
# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
security_group: default
|
||||||
|
port: 80
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
|
||||||
|
# Allow inbound port 80/tcp from security group web added to security group 'default'
|
||||||
|
- local_action:
|
||||||
|
module: cs_securitygroup_rule
|
||||||
|
security_group: default
|
||||||
|
port: 80
|
||||||
|
user_security_group: web
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
security_group:
|
||||||
|
description: security group of the rule.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: default
|
||||||
|
type:
|
||||||
|
description: type of the rule.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: ingress
|
||||||
|
cidr:
|
||||||
|
description: CIDR of the rule.
|
||||||
|
returned: success and cidr is defined
|
||||||
|
type: string
|
||||||
|
sample: 0.0.0.0/0
|
||||||
|
user_security_group:
|
||||||
|
description: user security group of the rule.
|
||||||
|
returned: success and user_security_group is defined
|
||||||
|
type: string
|
||||||
|
sample: default
|
||||||
|
protocol:
|
||||||
|
description: protocol of the rule.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: tcp
|
||||||
|
start_port:
|
||||||
|
description: start port of the rule.
|
||||||
|
returned: success
|
||||||
|
type: int
|
||||||
|
sample: 80
|
||||||
|
end_port:
|
||||||
|
description: end port of the rule.
|
||||||
|
returned: success
|
||||||
|
type: int
|
||||||
|
sample: 80
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
|
||||||
|
return protocol in ['tcp', 'udp'] \
|
||||||
|
and protocol == rule['protocol'] \
|
||||||
|
and start_port == int(rule['startport']) \
|
||||||
|
and end_port == int(rule['endport'])
|
||||||
|
|
||||||
|
|
||||||
|
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
|
||||||
|
return protocol == 'icmp' \
|
||||||
|
and protocol == rule['protocol'] \
|
||||||
|
and icmp_code == int(rule['icmpcode']) \
|
||||||
|
and icmp_type == int(rule['icmptype'])
|
||||||
|
|
||||||
|
|
||||||
|
def _ah_esp_gre_match(self, rule, protocol):
|
||||||
|
return protocol in ['ah', 'esp', 'gre'] \
|
||||||
|
and protocol == rule['protocol']
|
||||||
|
|
||||||
|
|
||||||
|
def _type_security_group_match(self, rule, security_group_name):
|
||||||
|
return security_group_name \
|
||||||
|
and 'securitygroupname' in rule \
|
||||||
|
and security_group_name == rule['securitygroupname']
|
||||||
|
|
||||||
|
|
||||||
|
def _type_cidr_match(self, rule, cidr):
|
||||||
|
return 'cidr' in rule \
|
||||||
|
and cidr == rule['cidr']
|
||||||
|
|
||||||
|
|
||||||
|
def _get_rule(self, rules):
|
||||||
|
user_security_group_name = self.module.params.get('user_security_group')
|
||||||
|
cidr = self.module.params.get('cidr')
|
||||||
|
protocol = self.module.params.get('protocol')
|
||||||
|
start_port = self.module.params.get('start_port')
|
||||||
|
end_port = self.module.params.get('end_port')
|
||||||
|
icmp_code = self.module.params.get('icmp_code')
|
||||||
|
icmp_type = self.module.params.get('icmp_type')
|
||||||
|
|
||||||
|
if not end_port:
|
||||||
|
end_port = start_port
|
||||||
|
|
||||||
|
if protocol in ['tcp', 'udp'] and not (start_port and end_port):
|
||||||
|
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
|
||||||
|
|
||||||
|
if protocol == 'icmp' and not (icmp_type and icmp_code):
|
||||||
|
self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
|
||||||
|
|
||||||
|
for rule in rules:
|
||||||
|
if user_security_group_name:
|
||||||
|
type_match = self._type_security_group_match(rule, user_security_group_name)
|
||||||
|
else:
|
||||||
|
type_match = self._type_cidr_match(rule, cidr)
|
||||||
|
|
||||||
|
protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \
|
||||||
|
or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
|
||||||
|
or self._ah_esp_gre_match(rule, protocol)
|
||||||
|
)
|
||||||
|
|
||||||
|
if type_match and protocol_match:
|
||||||
|
return rule
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_security_group(self, security_group_name=None):
|
||||||
|
if not security_group_name:
|
||||||
|
security_group_name = self.module.params.get('security_group')
|
||||||
|
args = {}
|
||||||
|
args['securitygroupname'] = security_group_name
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
sgs = self.cs.listSecurityGroups(**args)
|
||||||
|
if not sgs or 'securitygroup' not in sgs:
|
||||||
|
self.module.fail_json(msg="security group '%s' not found" % security_group_name)
|
||||||
|
return sgs['securitygroup'][0]
|
||||||
|
|
||||||
|
|
||||||
|
def add_rule(self):
|
||||||
|
security_group = self.get_security_group()
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
user_security_group_name = self.module.params.get('user_security_group')
|
||||||
|
|
||||||
|
# the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0.
|
||||||
|
# that is why we ignore if we have a user_security_group.
|
||||||
|
if user_security_group_name:
|
||||||
|
args['usersecuritygrouplist'] = []
|
||||||
|
user_security_group = self.get_security_group(user_security_group_name)
|
||||||
|
args['usersecuritygrouplist'].append({
|
||||||
|
'group': user_security_group['name'],
|
||||||
|
'account': user_security_group['account'],
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
args['cidrlist'] = self.module.params.get('cidr')
|
||||||
|
|
||||||
|
args['protocol'] = self.module.params.get('protocol')
|
||||||
|
args['startport'] = self.module.params.get('start_port')
|
||||||
|
args['endport'] = self.module.params.get('end_port')
|
||||||
|
args['icmptype'] = self.module.params.get('icmp_type')
|
||||||
|
args['icmpcode'] = self.module.params.get('icmp_code')
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['securitygroupid'] = security_group['id']
|
||||||
|
|
||||||
|
if not args['endport']:
|
||||||
|
args['endport'] = args['startport']
|
||||||
|
|
||||||
|
rule = None
|
||||||
|
res = None
|
||||||
|
type = self.module.params.get('type')
|
||||||
|
if type == 'ingress':
|
||||||
|
rule = self._get_rule(security_group['ingressrule'])
|
||||||
|
if not rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.authorizeSecurityGroupIngress(**args)
|
||||||
|
|
||||||
|
elif type == 'egress':
|
||||||
|
rule = self._get_rule(security_group['egressrule'])
|
||||||
|
if not rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.authorizeSecurityGroupEgress(**args)
|
||||||
|
|
||||||
|
if res and 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
security_group = self._poll_job(res, 'securitygroup')
|
||||||
|
return security_group
|
||||||
|
|
||||||
|
|
||||||
|
def remove_rule(self):
|
||||||
|
security_group = self.get_security_group()
|
||||||
|
rule = None
|
||||||
|
res = None
|
||||||
|
type = self.module.params.get('type')
|
||||||
|
if type == 'ingress':
|
||||||
|
rule = self._get_rule(security_group['ingressrule'])
|
||||||
|
if rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid'])
|
||||||
|
|
||||||
|
elif type == 'egress':
|
||||||
|
rule = self._get_rule(security_group['egressrule'])
|
||||||
|
if rule:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid'])
|
||||||
|
|
||||||
|
if res and 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
res = self._poll_job(res, 'securitygroup')
|
||||||
|
return security_group
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, security_group_rule):
|
||||||
|
type = self.module.params.get('type')
|
||||||
|
|
||||||
|
key = 'ingressrule'
|
||||||
|
if type == 'egress':
|
||||||
|
key = 'egressrule'
|
||||||
|
|
||||||
|
self.result['type'] = type
|
||||||
|
self.result['security_group'] = self.module.params.get('security_group')
|
||||||
|
|
||||||
|
if key in security_group_rule and security_group_rule[key]:
|
||||||
|
if 'securitygroupname' in security_group_rule[key][0]:
|
||||||
|
self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname']
|
||||||
|
if 'cidr' in security_group_rule[key][0]:
|
||||||
|
self.result['cidr'] = security_group_rule[key][0]['cidr']
|
||||||
|
if 'protocol' in security_group_rule[key][0]:
|
||||||
|
self.result['protocol'] = security_group_rule[key][0]['protocol']
|
||||||
|
if 'startport' in security_group_rule[key][0]:
|
||||||
|
self.result['start_port'] = security_group_rule[key][0]['startport']
|
||||||
|
if 'endport' in security_group_rule[key][0]:
|
||||||
|
self.result['end_port'] = security_group_rule[key][0]['endport']
|
||||||
|
if 'icmpcode' in security_group_rule[key][0]:
|
||||||
|
self.result['icmp_code'] = security_group_rule[key][0]['icmpcode']
|
||||||
|
if 'icmptype' in security_group_rule[key][0]:
|
||||||
|
self.result['icmp_type'] = security_group_rule[key][0]['icmptype']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
security_group = dict(required=True),
|
||||||
|
type = dict(choices=['ingress', 'egress'], default='ingress'),
|
||||||
|
cidr = dict(default='0.0.0.0/0'),
|
||||||
|
user_security_group = dict(default=None),
|
||||||
|
protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'),
|
||||||
|
icmp_type = dict(type='int', default=None),
|
||||||
|
icmp_code = dict(type='int', default=None),
|
||||||
|
start_port = dict(type='int', default=None, aliases=['port']),
|
||||||
|
end_port = dict(type='int', default=None),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
project = dict(default=None),
|
||||||
|
poll_async = dict(choices=BOOLEANS, default=True),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
mutually_exclusive = (
|
||||||
|
['icmp_type', 'start_port'],
|
||||||
|
['icmp_type', 'end_port'],
|
||||||
|
['icmp_code', 'start_port'],
|
||||||
|
['icmp_code', 'end_port'],
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
sg_rule = acs_sg_rule.remove_rule()
|
||||||
|
else:
|
||||||
|
sg_rule = acs_sg_rule.add_rule()
|
||||||
|
|
||||||
|
result = acs_sg_rule.get_result(sg_rule)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,238 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_sshkeypair
|
||||||
|
short_description: Manages SSH keys on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- If no key was found and no public key was provided and a new SSH
|
||||||
|
private/public key pair will be created and the private key will be returned.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of public key.
|
||||||
|
required: true
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the public key to be registered in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the public key.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
public_key:
|
||||||
|
description:
|
||||||
|
- String of the public key.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# create a new private / public key pair:
|
||||||
|
- local_action: cs_sshkeypair name=linus@example.com
|
||||||
|
register: key
|
||||||
|
- debug: msg='private key is {{ key.private_key }}'
|
||||||
|
|
||||||
|
# remove a public key by its name:
|
||||||
|
- local_action: cs_sshkeypair name=linus@example.com state=absent
|
||||||
|
|
||||||
|
# register your existing local public key:
|
||||||
|
- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}'
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
name:
|
||||||
|
description: Name of the SSH public key.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: linus@example.com
|
||||||
|
fingerprint:
|
||||||
|
description: Fingerprint of the SSH public key.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28"
|
||||||
|
private_key:
|
||||||
|
description: Private key of generated SSH keypair.
|
||||||
|
returned: changed
|
||||||
|
type: string
|
||||||
|
sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n"
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import sshpubkeys
|
||||||
|
has_lib_sshpubkeys = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_sshpubkeys = False
|
||||||
|
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
self.ssh_key = None
|
||||||
|
|
||||||
|
|
||||||
|
def register_ssh_key(self, public_key):
|
||||||
|
ssh_key = self.get_ssh_key()
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
|
||||||
|
res = None
|
||||||
|
if not ssh_key:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args['publickey'] = public_key
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.registerSSHKeyPair(**args)
|
||||||
|
|
||||||
|
else:
|
||||||
|
fingerprint = self._get_ssh_fingerprint(public_key)
|
||||||
|
if ssh_key['fingerprint'] != fingerprint:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
self.cs.deleteSSHKeyPair(**args)
|
||||||
|
args['publickey'] = public_key
|
||||||
|
res = self.cs.registerSSHKeyPair(**args)
|
||||||
|
|
||||||
|
if res and 'keypair' in res:
|
||||||
|
ssh_key = res['keypair']
|
||||||
|
|
||||||
|
return ssh_key
|
||||||
|
|
||||||
|
|
||||||
|
def create_ssh_key(self):
|
||||||
|
ssh_key = self.get_ssh_key()
|
||||||
|
if not ssh_key:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args = {}
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.createSSHKeyPair(**args)
|
||||||
|
ssh_key = res['keypair']
|
||||||
|
return ssh_key
|
||||||
|
|
||||||
|
|
||||||
|
def remove_ssh_key(self):
|
||||||
|
ssh_key = self.get_ssh_key()
|
||||||
|
if ssh_key:
|
||||||
|
self.result['changed'] = True
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteSSHKeyPair(**args)
|
||||||
|
return ssh_key
|
||||||
|
|
||||||
|
|
||||||
|
def get_ssh_key(self):
|
||||||
|
if not self.ssh_key:
|
||||||
|
args = {}
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
|
||||||
|
ssh_keys = self.cs.listSSHKeyPairs(**args)
|
||||||
|
if ssh_keys and 'sshkeypair' in ssh_keys:
|
||||||
|
self.ssh_key = ssh_keys['sshkeypair'][0]
|
||||||
|
return self.ssh_key
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, ssh_key):
|
||||||
|
if ssh_key:
|
||||||
|
if 'fingerprint' in ssh_key:
|
||||||
|
self.result['fingerprint'] = ssh_key['fingerprint']
|
||||||
|
|
||||||
|
if 'name' in ssh_key:
|
||||||
|
self.result['name'] = ssh_key['name']
|
||||||
|
|
||||||
|
if 'privatekey' in ssh_key:
|
||||||
|
self.result['private_key'] = ssh_key['privatekey']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def _get_ssh_fingerprint(self, public_key):
|
||||||
|
key = sshpubkeys.SSHKey(public_key)
|
||||||
|
return key.hash()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True, default=None),
|
||||||
|
public_key = dict(default=None),
|
||||||
|
project = dict(default=None),
|
||||||
|
state = dict(choices=['present', 'absent'], default='present'),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
if not has_lib_sshpubkeys:
|
||||||
|
module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_sshkey = AnsibleCloudStackSshKey(module)
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['absent']:
|
||||||
|
ssh_key = acs_sshkey.remove_ssh_key()
|
||||||
|
else:
|
||||||
|
public_key = module.params.get('public_key')
|
||||||
|
if public_key:
|
||||||
|
ssh_key = acs_sshkey.register_ssh_key(public_key)
|
||||||
|
else:
|
||||||
|
ssh_key = acs_sshkey.create_ssh_key()
|
||||||
|
|
||||||
|
result = acs_sshkey.get_result(ssh_key)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,290 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_vmsnapshot
|
||||||
|
short_description: Manages VM snapshots on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Create, remove and revert VM from snapshots.
|
||||||
|
version_added: '2.0'
|
||||||
|
author: René Moser
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Unique Name of the snapshot. In CloudStack terms C(displayname).
|
||||||
|
required: true
|
||||||
|
aliases: ['displayname']
|
||||||
|
vm:
|
||||||
|
description:
|
||||||
|
- Name of the virtual machine.
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- Description of the snapshot.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
snapshot_memory:
|
||||||
|
description:
|
||||||
|
- Snapshot memory if set to true.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- Name of the zone in which the VM is in. If not set, default zone is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the VM is assigned to.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the snapshot.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent', 'revert' ]
|
||||||
|
poll_async:
|
||||||
|
description:
|
||||||
|
- Poll async jobs until job has finished.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
---
|
||||||
|
# Create a VM snapshot of disk and memory before an upgrade
|
||||||
|
- local_action:
|
||||||
|
module: cs_vmsnapshot
|
||||||
|
name: Snapshot before upgrade
|
||||||
|
vm: web-01
|
||||||
|
snapshot_memory: yes
|
||||||
|
|
||||||
|
|
||||||
|
# Revert a VM to a snapshot after a failed upgrade
|
||||||
|
- local_action:
|
||||||
|
module: cs_vmsnapshot
|
||||||
|
name: Snapshot before upgrade
|
||||||
|
vm: web-01
|
||||||
|
state: revert
|
||||||
|
|
||||||
|
|
||||||
|
# Remove a VM snapshot after successful upgrade
|
||||||
|
- local_action:
|
||||||
|
module: cs_vmsnapshot
|
||||||
|
name: Snapshot before upgrade
|
||||||
|
vm: web-01
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
---
|
||||||
|
name:
|
||||||
|
description: Name of the snapshot.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: snapshot before update
|
||||||
|
displayname:
|
||||||
|
description: displayname of the snapshot.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: snapshot before update
|
||||||
|
created:
|
||||||
|
description: date of the snapshot.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2015-03-29T14:57:06+0200
|
||||||
|
current:
|
||||||
|
description: true if snapshot is current
|
||||||
|
returned: success
|
||||||
|
type: boolean
|
||||||
|
sample: True
|
||||||
|
state:
|
||||||
|
description: state of the vm snapshot
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Allocated
|
||||||
|
type:
|
||||||
|
description: type of vm snapshot
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: DiskAndMemory
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
description: description of vm snapshot
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: snapshot brought to you by Ansible
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
AnsibleCloudStack.__init__(self, module)
|
||||||
|
self.result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_snapshot(self):
|
||||||
|
args = {}
|
||||||
|
args['virtualmachineid'] = self.get_vm_id()
|
||||||
|
args['projectid'] = self.get_project_id()
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
|
||||||
|
snapshots = self.cs.listVMSnapshot(**args)
|
||||||
|
if snapshots:
|
||||||
|
return snapshots['vmSnapshot'][0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def create_snapshot(self):
|
||||||
|
snapshot = self.get_snapshot()
|
||||||
|
if not snapshot:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['virtualmachineid'] = self.get_vm_id()
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['description'] = self.module.params.get('description')
|
||||||
|
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.createVMSnapshot(**args)
|
||||||
|
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
snapshot = self._poll_job(res, 'vmsnapshot')
|
||||||
|
|
||||||
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
|
def remove_snapshot(self):
|
||||||
|
snapshot = self.get_snapshot()
|
||||||
|
if snapshot:
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
|
||||||
|
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
res = self._poll_job(res, 'vmsnapshot')
|
||||||
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
|
def revert_vm_to_snapshot(self):
|
||||||
|
snapshot = self.get_snapshot()
|
||||||
|
if snapshot:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
if snapshot['state'] != "Ready":
|
||||||
|
self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
|
||||||
|
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if res and poll_async:
|
||||||
|
res = self._poll_job(res, 'vmsnapshot')
|
||||||
|
return snapshot
|
||||||
|
|
||||||
|
self.module.fail_json(msg="snapshot not found, could not revert VM")
|
||||||
|
|
||||||
|
|
||||||
|
def get_result(self, snapshot):
|
||||||
|
if snapshot:
|
||||||
|
if 'displayname' in snapshot:
|
||||||
|
self.result['displayname'] = snapshot['displayname']
|
||||||
|
if 'created' in snapshot:
|
||||||
|
self.result['created'] = snapshot['created']
|
||||||
|
if 'current' in snapshot:
|
||||||
|
self.result['current'] = snapshot['current']
|
||||||
|
if 'state' in snapshot:
|
||||||
|
self.result['state'] = snapshot['state']
|
||||||
|
if 'type' in snapshot:
|
||||||
|
self.result['type'] = snapshot['type']
|
||||||
|
if 'name' in snapshot:
|
||||||
|
self.result['name'] = snapshot['name']
|
||||||
|
if 'description' in snapshot:
|
||||||
|
self.result['description'] = snapshot['description']
|
||||||
|
return self.result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True, aliases=['displayname']),
|
||||||
|
vm = dict(required=True),
|
||||||
|
description = dict(default=None),
|
||||||
|
project = dict(default=None),
|
||||||
|
zone = dict(default=None),
|
||||||
|
snapshot_memory = dict(choices=BOOLEANS, default=False),
|
||||||
|
state = dict(choices=['present', 'absent', 'revert'], default='present'),
|
||||||
|
poll_async = dict(choices=BOOLEANS, default=True),
|
||||||
|
api_key = dict(default=None),
|
||||||
|
api_secret = dict(default=None),
|
||||||
|
api_url = dict(default=None),
|
||||||
|
api_http_method = dict(default='get'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
if state in ['revert']:
|
||||||
|
snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
|
||||||
|
elif state in ['absent']:
|
||||||
|
snapshot = acs_vmsnapshot.remove_snapshot()
|
||||||
|
else:
|
||||||
|
snapshot = acs_vmsnapshot.create_snapshot()
|
||||||
|
|
||||||
|
result = acs_vmsnapshot.get_result(snapshot)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,208 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""An Ansible module to utilize GCE image resources."""
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: gce_img
|
||||||
|
version_added: "1.9"
|
||||||
|
short_description: utilize GCE image resources
|
||||||
|
description:
|
||||||
|
- This module can create and delete GCE private images from gzipped
|
||||||
|
compressed tarball containing raw disk data or from existing detached
|
||||||
|
disks in any zone. U(https://cloud.google.com/compute/docs/images)
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- the name of the image to create or delete
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- an optional description
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
source:
|
||||||
|
description:
|
||||||
|
- the source disk or the Google Cloud Storage URI to create the image from
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- desired state of the image
|
||||||
|
required: false
|
||||||
|
default: "present"
|
||||||
|
choices: ["present", "absent"]
|
||||||
|
aliases: []
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- the zone of the disk specified by source
|
||||||
|
required: false
|
||||||
|
default: "us-central1-a"
|
||||||
|
aliases: []
|
||||||
|
service_account_email:
|
||||||
|
description:
|
||||||
|
- service account email
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
pem_file:
|
||||||
|
description:
|
||||||
|
- path to the pem file associated with the service account email
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
project_id:
|
||||||
|
description:
|
||||||
|
- your GCE project ID
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
|
||||||
|
requirements: [ "libcloud" ]
|
||||||
|
author: Peter Tan <ptan@google.com>
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
|
||||||
|
- gce_img:
|
||||||
|
name: test-image
|
||||||
|
source: test-disk
|
||||||
|
zone: us-central1-a
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# Create an image named test-image from a tarball in Google Cloud Storage.
|
||||||
|
- gce_img:
|
||||||
|
name: test-image
|
||||||
|
source: https://storage.googleapis.com/bucket/path/to/image.tgz
|
||||||
|
|
||||||
|
# Alternatively use the gs scheme
|
||||||
|
- gce_img:
|
||||||
|
name: test-image
|
||||||
|
source: gs://bucket/path/to/image.tgz
|
||||||
|
|
||||||
|
# Delete an image named test-image.
|
||||||
|
- gce_img:
|
||||||
|
name: test-image
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
from libcloud.compute.types import Provider
|
||||||
|
from libcloud.compute.providers import get_driver
|
||||||
|
from libcloud.common.google import GoogleBaseError
|
||||||
|
from libcloud.common.google import ResourceExistsError
|
||||||
|
from libcloud.common.google import ResourceNotFoundError
|
||||||
|
_ = Provider.GCE
|
||||||
|
has_libcloud = True
|
||||||
|
except ImportError:
|
||||||
|
has_libcloud = False
|
||||||
|
|
||||||
|
|
||||||
|
GCS_URI = 'https://storage.googleapis.com/'
|
||||||
|
|
||||||
|
|
||||||
|
def create_image(gce, name, module):
|
||||||
|
"""Create an image with the specified name."""
|
||||||
|
source = module.params.get('source')
|
||||||
|
zone = module.params.get('zone')
|
||||||
|
desc = module.params.get('description')
|
||||||
|
|
||||||
|
if not source:
|
||||||
|
module.fail_json(msg='Must supply a source', changed=False)
|
||||||
|
|
||||||
|
if source.startswith(GCS_URI):
|
||||||
|
# source is a Google Cloud Storage URI
|
||||||
|
volume = source
|
||||||
|
elif source.startswith('gs://'):
|
||||||
|
# libcloud only accepts https URI.
|
||||||
|
volume = source.replace('gs://', GCS_URI)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
volume = gce.ex_get_volume(source, zone)
|
||||||
|
except ResourceNotFoundError:
|
||||||
|
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
|
||||||
|
changed=False)
|
||||||
|
except GoogleBaseError, e:
|
||||||
|
module.fail_json(msg=str(e), changed=False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
gce.ex_create_image(name, volume, desc, False)
|
||||||
|
return True
|
||||||
|
except ResourceExistsError:
|
||||||
|
return False
|
||||||
|
except GoogleBaseError, e:
|
||||||
|
module.fail_json(msg=str(e), changed=False)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_image(gce, name, module):
|
||||||
|
"""Delete a specific image resource by name."""
|
||||||
|
try:
|
||||||
|
gce.ex_delete_image(name)
|
||||||
|
return True
|
||||||
|
except ResourceNotFoundError:
|
||||||
|
return False
|
||||||
|
except GoogleBaseError, e:
|
||||||
|
module.fail_json(msg=str(e), changed=False)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(required=True),
|
||||||
|
description=dict(),
|
||||||
|
source=dict(),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
zone=dict(default='us-central1-a'),
|
||||||
|
service_account_email=dict(),
|
||||||
|
pem_file=dict(),
|
||||||
|
project_id=dict(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_libcloud:
|
||||||
|
module.fail_json(msg='libcloud with GCE support is required.')
|
||||||
|
|
||||||
|
gce = gce_connect(module)
|
||||||
|
|
||||||
|
name = module.params.get('name')
|
||||||
|
state = module.params.get('state')
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
# user wants to create an image.
|
||||||
|
if state == 'present':
|
||||||
|
changed = create_image(gce, name, module)
|
||||||
|
|
||||||
|
# user wants to delete the image.
|
||||||
|
if state == 'absent':
|
||||||
|
changed = delete_image(gce, name, module)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, name=name)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.gce import *
|
||||||
|
|
||||||
|
main()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,175 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: vmware_datacenter
|
||||||
|
short_description: Manage VMware vSphere Datacenters
|
||||||
|
description:
|
||||||
|
- Manage VMware vSphere Datacenters
|
||||||
|
version_added: 2.0
|
||||||
|
author: Joseph Callen
|
||||||
|
notes:
|
||||||
|
- Tested on vSphere 5.5
|
||||||
|
requirements:
|
||||||
|
- PyVmomi
|
||||||
|
options:
|
||||||
|
hostname:
|
||||||
|
description:
|
||||||
|
- The hostname or IP address of the vSphere vCenter API server
|
||||||
|
required: True
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- The username of the vSphere vCenter
|
||||||
|
required: True
|
||||||
|
aliases: ['user', 'admin']
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- The password of the vSphere vCenter
|
||||||
|
required: True
|
||||||
|
aliases: ['pass', 'pwd']
|
||||||
|
datacenter_name:
|
||||||
|
description:
|
||||||
|
- The name of the datacenter the cluster will be created in.
|
||||||
|
required: True
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- If the datacenter should be present or absent
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
required: True
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Example vmware_datacenter command from Ansible Playbooks
|
||||||
|
- name: Create Datacenter
|
||||||
|
local_action: >
|
||||||
|
vmware_datacenter
|
||||||
|
hostname="{{ ansible_ssh_host }}" username=root password=vmware
|
||||||
|
datacenter_name="datacenter"
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from pyVmomi import vim, vmodl
|
||||||
|
HAS_PYVMOMI = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_PYVMOMI = False
|
||||||
|
|
||||||
|
|
||||||
|
def state_create_datacenter(module):
|
||||||
|
datacenter_name = module.params['datacenter_name']
|
||||||
|
content = module.params['content']
|
||||||
|
changed = True
|
||||||
|
datacenter = None
|
||||||
|
|
||||||
|
folder = content.rootFolder
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not module.check_mode:
|
||||||
|
datacenter = folder.CreateDatacenter(name=datacenter_name)
|
||||||
|
module.exit_json(changed=changed, result=str(datacenter))
|
||||||
|
except vim.fault.DuplicateName:
|
||||||
|
module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name)
|
||||||
|
except vim.fault.InvalidName:
|
||||||
|
module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name)
|
||||||
|
except vmodl.fault.NotSupported:
|
||||||
|
# This should never happen
|
||||||
|
module.fail_json(msg="Trying to create a datacenter on an incorrect folder object")
|
||||||
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
|
module.fail_json(msg=runtime_fault.msg)
|
||||||
|
except vmodl.MethodFault as method_fault:
|
||||||
|
module.fail_json(msg=method_fault.msg)
|
||||||
|
|
||||||
|
|
||||||
|
def check_datacenter_state(module):
|
||||||
|
datacenter_name = module.params['datacenter_name']
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = connect_to_api(module)
|
||||||
|
datacenter = find_datacenter_by_name(content, datacenter_name)
|
||||||
|
module.params['content'] = content
|
||||||
|
|
||||||
|
if datacenter is None:
|
||||||
|
return 'absent'
|
||||||
|
else:
|
||||||
|
module.params['datacenter'] = datacenter
|
||||||
|
return 'present'
|
||||||
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
|
module.fail_json(msg=runtime_fault.msg)
|
||||||
|
except vmodl.MethodFault as method_fault:
|
||||||
|
module.fail_json(msg=method_fault.msg)
|
||||||
|
|
||||||
|
|
||||||
|
def state_destroy_datacenter(module):
|
||||||
|
datacenter = module.params['datacenter']
|
||||||
|
changed = True
|
||||||
|
result = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not module.check_mode:
|
||||||
|
task = datacenter.Destroy_Task()
|
||||||
|
changed, result = wait_for_task(task)
|
||||||
|
module.exit_json(changed=changed, result=result)
|
||||||
|
except vim.fault.VimFault as vim_fault:
|
||||||
|
module.fail_json(msg=vim_fault.msg)
|
||||||
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
|
module.fail_json(msg=runtime_fault.msg)
|
||||||
|
except vmodl.MethodFault as method_fault:
|
||||||
|
module.fail_json(msg=method_fault.msg)
|
||||||
|
|
||||||
|
|
||||||
|
def state_exit_unchanged(module):
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
argument_spec = vmware_argument_spec()
|
||||||
|
argument_spec.update(
|
||||||
|
dict(
|
||||||
|
datacenter_name=dict(required=True, type='str'),
|
||||||
|
state=dict(required=True, choices=['present', 'absent'], type='str'),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||||
|
|
||||||
|
if not HAS_PYVMOMI:
|
||||||
|
module.fail_json(msg='pyvmomi is required for this module')
|
||||||
|
|
||||||
|
datacenter_states = {
|
||||||
|
'absent': {
|
||||||
|
'present': state_destroy_datacenter,
|
||||||
|
'absent': state_exit_unchanged,
|
||||||
|
},
|
||||||
|
'present': {
|
||||||
|
'present': state_exit_unchanged,
|
||||||
|
'absent': state_create_datacenter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
desired_state = module.params['state']
|
||||||
|
current_state = check_datacenter_state(module)
|
||||||
|
|
||||||
|
datacenter_states[desired_state][current_state](module)
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.vmware import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,506 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
module: consul
|
||||||
|
short_description: "Add, modify & delete services within a consul cluster.
|
||||||
|
See http://conul.io for more details."
|
||||||
|
description:
|
||||||
|
- registers services and checks for an agent with a consul cluster. A service
|
||||||
|
is some process running on the agent node that should be advertised by
|
||||||
|
consul's discovery mechanism. It may optionally supply a check definition,
|
||||||
|
a periodic service test to notify the consul cluster of service's health.
|
||||||
|
Checks may also be registered per node e.g. disk usage, or cpu usage and
|
||||||
|
notify the health of the entire node to the cluster.
|
||||||
|
Service level checks do not require a check name or id as these are derived
|
||||||
|
by Consul from the Service name and id respectively by appending 'service:'.
|
||||||
|
Node level checks require a check_name and optionally a check_id.
|
||||||
|
Currently, there is no complete way to retrieve the script, interval or ttl
|
||||||
|
metadata for a registered check. Without this metadata it is not possible to
|
||||||
|
tell if the data supplied with ansible represents a change to a check. As a
|
||||||
|
result this does not attempt to determine changes and will always report a
|
||||||
|
changed occurred. An api method is planned to supply this metadata so at that
|
||||||
|
stage change management will be added.
|
||||||
|
requirements:
|
||||||
|
- python-consul
|
||||||
|
- requests
|
||||||
|
version_added: "1.9"
|
||||||
|
author: Steve Gargan (steve.gargan@gmail.com)
|
||||||
|
options:
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- register or deregister the consul service, defaults to present
|
||||||
|
required: true
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
service_name:
|
||||||
|
desciption:
|
||||||
|
- Unique name for the service on a node, must be unique per node,
|
||||||
|
required if registering a service. May be ommitted if registering
|
||||||
|
a node level check
|
||||||
|
required: false
|
||||||
|
service_id:
|
||||||
|
description:
|
||||||
|
- the ID for the service, must be unique per node, defaults to the
|
||||||
|
service name if the service name is supplied
|
||||||
|
required: false
|
||||||
|
default: service_name if supplied
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- host of the consul agent defaults to localhost
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- the port on which the consul agent is running
|
||||||
|
required: false
|
||||||
|
default: 8500
|
||||||
|
notes:
|
||||||
|
description:
|
||||||
|
- Notes to attach to check when registering it.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
service_port:
|
||||||
|
description:
|
||||||
|
- the port on which the service is listening required for
|
||||||
|
registration of a service, i.e. if service_name or service_id is set
|
||||||
|
required: false
|
||||||
|
tags:
|
||||||
|
description:
|
||||||
|
- a list of tags that will be attached to the service registration.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
script:
|
||||||
|
description:
|
||||||
|
- the script/command that will be run periodically to check the health
|
||||||
|
of the service. Scripts require an interval and vise versa
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
interval:
|
||||||
|
description:
|
||||||
|
- the interval at which the service check will be run. This is a number
|
||||||
|
with a s or m suffix to signify the units of seconds or minutes e.g
|
||||||
|
15s or 1m. If no suffix is supplied, m will be used by default e.g.
|
||||||
|
1 will be 1m. Required if the script param is specified.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
check_id:
|
||||||
|
description:
|
||||||
|
- an ID for the service check, defaults to the check name, ignored if
|
||||||
|
part of a service definition.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
check_name:
|
||||||
|
description:
|
||||||
|
- a name for the service check, defaults to the check id. required if
|
||||||
|
standalone, ignored if part of service definition.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
ttl:
|
||||||
|
description:
|
||||||
|
- checks can be registered with a ttl instead of a script and interval
|
||||||
|
this means that the service will check in with the agent before the
|
||||||
|
ttl expires. If it doesn't the check will be considered failed.
|
||||||
|
Required if registering a check and the script an interval are missing
|
||||||
|
Similar to the interval this is a number with a s or m suffix to
|
||||||
|
signify the units of seconds or minutes e.g 15s or 1m. If no suffix
|
||||||
|
is supplied, m will be used by default e.g. 1 will be 1m
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
token:
|
||||||
|
description:
|
||||||
|
- the token key indentifying an ACL rule set. May be required to
|
||||||
|
register services.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: register nginx service with the local consul agent
|
||||||
|
consul:
|
||||||
|
name: nginx
|
||||||
|
service_port: 80
|
||||||
|
|
||||||
|
- name: register nginx service with curl check
|
||||||
|
consul:
|
||||||
|
name: nginx
|
||||||
|
service_port: 80
|
||||||
|
script: "curl http://localhost"
|
||||||
|
interval: 60s
|
||||||
|
|
||||||
|
- name: register nginx with some service tags
|
||||||
|
consul:
|
||||||
|
name: nginx
|
||||||
|
service_port: 80
|
||||||
|
tags:
|
||||||
|
- prod
|
||||||
|
- webservers
|
||||||
|
|
||||||
|
- name: remove nginx service
|
||||||
|
consul:
|
||||||
|
name: nginx
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: create a node level check to test disk usage
|
||||||
|
consul:
|
||||||
|
check_name: Disk usage
|
||||||
|
check_id: disk_usage
|
||||||
|
script: "/opt/disk_usage.py"
|
||||||
|
interval: 5m
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
except ImportError:
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
|
try:
|
||||||
|
import consul
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
python_consul_installed = True
|
||||||
|
except ImportError, e:
|
||||||
|
python_consul_installed = False
|
||||||
|
|
||||||
|
def register_with_consul(module):
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
add(module)
|
||||||
|
else:
|
||||||
|
remove(module)
|
||||||
|
|
||||||
|
|
||||||
|
def add(module):
|
||||||
|
''' adds a service or a check depending on supplied configuration'''
|
||||||
|
check = parse_check(module)
|
||||||
|
service = parse_service(module)
|
||||||
|
|
||||||
|
if not service and not check:
|
||||||
|
module.fail_json(msg='a name and port are required to register a service')
|
||||||
|
|
||||||
|
if service:
|
||||||
|
if check:
|
||||||
|
service.add_check(check)
|
||||||
|
add_service(module, service)
|
||||||
|
elif check:
|
||||||
|
add_check(module, check)
|
||||||
|
|
||||||
|
|
||||||
|
def remove(module):
|
||||||
|
''' removes a service or a check '''
|
||||||
|
service_id = module.params.get('service_id') or module.params.get('service_name')
|
||||||
|
check_id = module.params.get('check_id') or module.params.get('check_name')
|
||||||
|
if not (service_id or check_id):
|
||||||
|
module.fail_json(msg='services and checks are removed by id or name.'\
|
||||||
|
' please supply a service id/name or a check id/name')
|
||||||
|
if service_id:
|
||||||
|
remove_service(module, service_id)
|
||||||
|
else:
|
||||||
|
remove_check(module, check_id)
|
||||||
|
|
||||||
|
|
||||||
|
def add_check(module, check):
|
||||||
|
''' registers a check with the given agent. currently there is no way
|
||||||
|
retrieve the full metadata of an existing check through the consul api.
|
||||||
|
Without this we can't compare to the supplied check and so we must assume
|
||||||
|
a change. '''
|
||||||
|
if not check.name:
|
||||||
|
module.fail_json(msg='a check name is required for a node level check,'\
|
||||||
|
' one not attached to a service')
|
||||||
|
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
check.register(consul_api)
|
||||||
|
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
check_id=check.check_id,
|
||||||
|
check_name=check.name,
|
||||||
|
script=check.script,
|
||||||
|
interval=check.interval,
|
||||||
|
ttl=check.ttl)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_check(module, check_id):
|
||||||
|
''' removes a check using its id '''
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
|
||||||
|
if check_id in consul_api.agent.checks():
|
||||||
|
consul_api.agent.check.deregister(check_id)
|
||||||
|
module.exit_json(changed=True, id=check_id)
|
||||||
|
|
||||||
|
module.exit_json(changed=False, id=check_id)
|
||||||
|
|
||||||
|
|
||||||
|
def add_service(module, service):
|
||||||
|
''' registers a service with the the current agent '''
|
||||||
|
result = service
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
existing = get_service_by_id(consul_api, service.id)
|
||||||
|
|
||||||
|
# there is no way to retreive the details of checks so if a check is present
|
||||||
|
# in the service it must be reregistered
|
||||||
|
if service.has_checks() or not existing or not existing == service:
|
||||||
|
|
||||||
|
service.register(consul_api)
|
||||||
|
# check that it registered correctly
|
||||||
|
registered = get_service_by_id(consul_api, service.id)
|
||||||
|
if registered:
|
||||||
|
result = registered
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
service_id=result.id,
|
||||||
|
service_name=result.name,
|
||||||
|
service_port=result.port,
|
||||||
|
checks=map(lambda x: x.to_dict(), service.checks),
|
||||||
|
tags=result.tags)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_service(module, service_id):
|
||||||
|
''' deregister a service from the given agent using its service id '''
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
service = get_service_by_id(consul_api, service_id)
|
||||||
|
if service:
|
||||||
|
consul_api.agent.service.deregister(service_id)
|
||||||
|
module.exit_json(changed=True, id=service_id)
|
||||||
|
|
||||||
|
module.exit_json(changed=False, id=service_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_consul_api(module, token=None):
|
||||||
|
return consul.Consul(host=module.params.get('host'),
|
||||||
|
port=module.params.get('port'),
|
||||||
|
token=module.params.get('token'))
|
||||||
|
|
||||||
|
|
||||||
|
def get_service_by_id(consul_api, service_id):
|
||||||
|
''' iterate the registered services and find one with the given id '''
|
||||||
|
for name, service in consul_api.agent.services().iteritems():
|
||||||
|
if service['ID'] == service_id:
|
||||||
|
return ConsulService(loaded=service)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_check(module):
|
||||||
|
|
||||||
|
if module.params.get('script') and module.params.get('ttl'):
|
||||||
|
module.fail_json(
|
||||||
|
msg='check are either script or ttl driven, supplying both does'\
|
||||||
|
' not make sense')
|
||||||
|
|
||||||
|
if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'):
|
||||||
|
|
||||||
|
return ConsulCheck(
|
||||||
|
module.params.get('check_id'),
|
||||||
|
module.params.get('check_name'),
|
||||||
|
module.params.get('check_node'),
|
||||||
|
module.params.get('check_host'),
|
||||||
|
module.params.get('script'),
|
||||||
|
module.params.get('interval'),
|
||||||
|
module.params.get('ttl'),
|
||||||
|
module.params.get('notes')
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_service(module):
|
||||||
|
|
||||||
|
if module.params.get('service_name') and module.params.get('service_port'):
|
||||||
|
return ConsulService(
|
||||||
|
module.params.get('service_id'),
|
||||||
|
module.params.get('service_name'),
|
||||||
|
module.params.get('service_port'),
|
||||||
|
module.params.get('tags'),
|
||||||
|
)
|
||||||
|
elif module.params.get('service_name') and not module.params.get('service_port'):
|
||||||
|
|
||||||
|
module.fail_json(
|
||||||
|
msg="service_name supplied but no service_port, a port is required"\
|
||||||
|
" to configure a service. Did you configure the 'port' "\
|
||||||
|
"argument meaning 'service_port'?")
|
||||||
|
|
||||||
|
|
||||||
|
class ConsulService():
|
||||||
|
|
||||||
|
def __init__(self, service_id=None, name=None, port=-1,
|
||||||
|
tags=None, loaded=None):
|
||||||
|
self.id = self.name = name
|
||||||
|
if service_id:
|
||||||
|
self.id = service_id
|
||||||
|
self.port = port
|
||||||
|
self.tags = tags
|
||||||
|
self.checks = []
|
||||||
|
if loaded:
|
||||||
|
self.id = loaded['ID']
|
||||||
|
self.name = loaded['Service']
|
||||||
|
self.port = loaded['Port']
|
||||||
|
self.tags = loaded['Tags']
|
||||||
|
|
||||||
|
def register(self, consul_api):
|
||||||
|
if len(self.checks) > 0:
|
||||||
|
check = self.checks[0]
|
||||||
|
consul_api.agent.service.register(
|
||||||
|
self.name,
|
||||||
|
service_id=self.id,
|
||||||
|
port=self.port,
|
||||||
|
tags=self.tags,
|
||||||
|
script=check.script,
|
||||||
|
interval=check.interval,
|
||||||
|
ttl=check.ttl)
|
||||||
|
else:
|
||||||
|
consul_api.agent.service.register(
|
||||||
|
self.name,
|
||||||
|
service_id=self.id,
|
||||||
|
port=self.port,
|
||||||
|
tags=self.tags)
|
||||||
|
|
||||||
|
def add_check(self, check):
|
||||||
|
self.checks.append(check)
|
||||||
|
|
||||||
|
def checks(self):
|
||||||
|
return self.checks
|
||||||
|
|
||||||
|
def has_checks(self):
|
||||||
|
return len(self.checks) > 0
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (isinstance(other, self.__class__)
|
||||||
|
and self.id == other.id
|
||||||
|
and self.name == other.name
|
||||||
|
and self.port == other.port
|
||||||
|
and self.tags == other.tags)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
data = {'id': self.id, "name": self.name}
|
||||||
|
if self.port:
|
||||||
|
data['port'] = self.port
|
||||||
|
if self.tags and len(self.tags) > 0:
|
||||||
|
data['tags'] = self.tags
|
||||||
|
if len(self.checks) > 0:
|
||||||
|
data['check'] = self.checks[0].to_dict()
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class ConsulCheck():
|
||||||
|
|
||||||
|
def __init__(self, check_id, name, node=None, host='localhost',
|
||||||
|
script=None, interval=None, ttl=None, notes=None):
|
||||||
|
self.check_id = self.name = name
|
||||||
|
if check_id:
|
||||||
|
self.check_id = check_id
|
||||||
|
self.script = script
|
||||||
|
self.interval = self.validate_duration('interval', interval)
|
||||||
|
self.ttl = self.validate_duration('ttl', ttl)
|
||||||
|
self.notes = notes
|
||||||
|
self.node = node
|
||||||
|
self.host = host
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def validate_duration(self, name, duration):
|
||||||
|
if duration:
|
||||||
|
duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
|
||||||
|
if not any((duration.endswith(suffix) for suffix in duration_units)):
|
||||||
|
raise Exception('Invalid %s %s you must specify units (%s)' %
|
||||||
|
(name, duration, ', '.join(duration_units)))
|
||||||
|
return duration
|
||||||
|
|
||||||
|
def register(self, consul_api):
|
||||||
|
consul_api.agent.check.register(self.name, check_id=self.check_id,
|
||||||
|
script=self.script,
|
||||||
|
interval=self.interval,
|
||||||
|
ttl=self.ttl, notes=self.notes)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (isinstance(other, self.__class__)
|
||||||
|
and self.check_id == other.check_id
|
||||||
|
and self.name == other.name
|
||||||
|
and self.script == script
|
||||||
|
and self.interval == interval)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
data = {}
|
||||||
|
self._add(data, 'id', attr='check_id')
|
||||||
|
self._add(data, 'name', attr='check_name')
|
||||||
|
self._add(data, 'script')
|
||||||
|
self._add(data, 'node')
|
||||||
|
self._add(data, 'notes')
|
||||||
|
self._add(data, 'host')
|
||||||
|
self._add(data, 'interval')
|
||||||
|
self._add(data, 'ttl')
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _add(self, data, key, attr=None):
|
||||||
|
try:
|
||||||
|
if attr == None:
|
||||||
|
attr = key
|
||||||
|
data[key] = getattr(self, attr)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_dependencies(module):
|
||||||
|
if not python_consul_installed:
|
||||||
|
module.fail_json(msg="python-consul required for this module. "\
|
||||||
|
"see http://python-consul.readthedocs.org/en/latest/#installation")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
host=dict(default='localhost'),
|
||||||
|
port=dict(default=8500, type='int'),
|
||||||
|
check_id=dict(required=False),
|
||||||
|
check_name=dict(required=False),
|
||||||
|
check_node=dict(required=False),
|
||||||
|
check_host=dict(required=False),
|
||||||
|
notes=dict(required=False),
|
||||||
|
script=dict(required=False),
|
||||||
|
service_id=dict(required=False),
|
||||||
|
service_name=dict(required=False),
|
||||||
|
service_port=dict(required=False, type='int'),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
interval=dict(required=False, type='str'),
|
||||||
|
ttl=dict(required=False, type='str'),
|
||||||
|
tags=dict(required=False, type='list'),
|
||||||
|
token=dict(required=False)
|
||||||
|
),
|
||||||
|
supports_check_mode=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
test_dependencies(module)
|
||||||
|
|
||||||
|
try:
|
||||||
|
register_with_consul(module)
|
||||||
|
except ConnectionError, e:
|
||||||
|
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||||
|
module.params.get('host'), module.params.get('port'), str(e)))
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,320 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
module: consul_acl
|
||||||
|
short_description: "manipulate consul acl keys and rules"
|
||||||
|
description:
|
||||||
|
- allows the addition, modification and deletion of ACL keys and associated
|
||||||
|
rules in a consul cluster via the agent. For more details on using and
|
||||||
|
configuring ACLs, see https://www.consul.io/docs/internals/acl.html.
|
||||||
|
requirements:
|
||||||
|
- python-consul
|
||||||
|
- pyhcl
|
||||||
|
- requests
|
||||||
|
version_added: "1.9"
|
||||||
|
author: Steve Gargan (steve.gargan@gmail.com)
|
||||||
|
options:
|
||||||
|
mgmt_token:
|
||||||
|
description:
|
||||||
|
- a management token is required to manipulate the acl lists
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- whether the ACL pair should be present or absent, defaults to present
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
type:
|
||||||
|
description:
|
||||||
|
- the type of token that should be created, either management or
|
||||||
|
client, defaults to client
|
||||||
|
choices: ['client', 'management']
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- the name that should be associated with the acl key, this is opaque
|
||||||
|
to Consul
|
||||||
|
required: false
|
||||||
|
token:
|
||||||
|
description:
|
||||||
|
- the token key indentifying an ACL rule set. If generated by consul
|
||||||
|
this will be a UUID.
|
||||||
|
required: false
|
||||||
|
rules:
|
||||||
|
description:
|
||||||
|
- an list of the rules that should be associated with a given key/token.
|
||||||
|
required: false
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- host of the consul agent defaults to localhost
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- the port on which the consul agent is running
|
||||||
|
required: false
|
||||||
|
default: 8500
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: create an acl token with rules
|
||||||
|
consul_acl:
|
||||||
|
mgmt_token: 'some_management_acl'
|
||||||
|
host: 'consul1.mycluster.io'
|
||||||
|
name: 'Foo access'
|
||||||
|
rules:
|
||||||
|
- key: 'foo'
|
||||||
|
policy: read
|
||||||
|
- key: 'private/foo'
|
||||||
|
policy: deny
|
||||||
|
|
||||||
|
- name: remove a token
|
||||||
|
consul_acl:
|
||||||
|
mgmt_token: 'some_management_acl'
|
||||||
|
host: 'consul1.mycluster.io'
|
||||||
|
token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e'
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
try:
|
||||||
|
import consul
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
python_consul_installed = True
|
||||||
|
except ImportError, e:
|
||||||
|
python_consul_installed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import hcl
|
||||||
|
pyhcl_installed = True
|
||||||
|
except ImportError:
|
||||||
|
pyhcl_installed = False
|
||||||
|
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
|
||||||
|
def execute(module):
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
update_acl(module)
|
||||||
|
else:
|
||||||
|
remove_acl(module)
|
||||||
|
|
||||||
|
|
||||||
|
def update_acl(module):
|
||||||
|
|
||||||
|
rules = module.params.get('rules')
|
||||||
|
state = module.params.get('state')
|
||||||
|
token = module.params.get('token')
|
||||||
|
token_type = module.params.get('token_type')
|
||||||
|
mgmt = module.params.get('mgmt_token')
|
||||||
|
name = module.params.get('name')
|
||||||
|
consul = get_consul_api(module, mgmt)
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
if token:
|
||||||
|
existing_rules = load_rules_for_token(module, consul, token)
|
||||||
|
supplied_rules = yml_to_rules(module, rules)
|
||||||
|
print existing_rules
|
||||||
|
print supplied_rules
|
||||||
|
changed = not existing_rules == supplied_rules
|
||||||
|
if changed:
|
||||||
|
y = supplied_rules.to_hcl()
|
||||||
|
token = consul.acl.update(
|
||||||
|
token,
|
||||||
|
name=name,
|
||||||
|
type=token_type,
|
||||||
|
rules=supplied_rules.to_hcl())
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
rules = yml_to_rules(module, rules)
|
||||||
|
if rules.are_rules():
|
||||||
|
rules = rules.to_json()
|
||||||
|
else:
|
||||||
|
rules = None
|
||||||
|
|
||||||
|
token = consul.acl.create(
|
||||||
|
name=name, type=token_type, rules=rules)
|
||||||
|
changed = True
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(
|
||||||
|
msg="No token returned, check your managment key and that \
|
||||||
|
the host is in the acl datacenter %s" % e)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Could not create/update acl %s" % e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
token=token,
|
||||||
|
rules=rules,
|
||||||
|
name=name,
|
||||||
|
type=token_type)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_acl(module):
|
||||||
|
state = module.params.get('state')
|
||||||
|
token = module.params.get('token')
|
||||||
|
mgmt = module.params.get('mgmt_token')
|
||||||
|
|
||||||
|
consul = get_consul_api(module, token=mgmt)
|
||||||
|
changed = token and consul.acl.info(token)
|
||||||
|
if changed:
|
||||||
|
token = consul.acl.destroy(token)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, token=token)
|
||||||
|
|
||||||
|
|
||||||
|
def load_rules_for_token(module, consul_api, token):
|
||||||
|
try:
|
||||||
|
rules = Rules()
|
||||||
|
info = consul_api.acl.info(token)
|
||||||
|
if info and info['Rules']:
|
||||||
|
rule_set = to_ascii(info['Rules'])
|
||||||
|
for rule in hcl.loads(rule_set).values():
|
||||||
|
for key, policy in rule.iteritems():
|
||||||
|
rules.add_rule(Rule(key, policy['policy']))
|
||||||
|
return rules
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(
|
||||||
|
msg="Could not load rule list from retrieved rule data %s, %s" % (
|
||||||
|
token, e))
|
||||||
|
|
||||||
|
return json_to_rules(module, loaded)
|
||||||
|
|
||||||
|
def to_ascii(unicode_string):
|
||||||
|
if isinstance(unicode_string, unicode):
|
||||||
|
return unicode_string.encode('ascii', 'ignore')
|
||||||
|
return unicode_string
|
||||||
|
|
||||||
|
def yml_to_rules(module, yml_rules):
|
||||||
|
rules = Rules()
|
||||||
|
if yml_rules:
|
||||||
|
for rule in yml_rules:
|
||||||
|
if not('key' in rule or 'policy' in rule):
|
||||||
|
module.fail_json(msg="a rule requires a key and a policy.")
|
||||||
|
rules.add_rule(Rule(rule['key'], rule['policy']))
|
||||||
|
return rules
|
||||||
|
|
||||||
|
template = '''key "%s" {
|
||||||
|
policy = "%s"
|
||||||
|
}'''
|
||||||
|
|
||||||
|
class Rules:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.rules = {}
|
||||||
|
|
||||||
|
def add_rule(self, rule):
|
||||||
|
self.rules[rule.key] = rule
|
||||||
|
|
||||||
|
def are_rules(self):
|
||||||
|
return len(self.rules) > 0
|
||||||
|
|
||||||
|
def to_json(self):
|
||||||
|
rules = {}
|
||||||
|
for key, rule in self.rules.iteritems():
|
||||||
|
rules[key] = {'policy': rule.policy}
|
||||||
|
return json.dumps({'keys': rules})
|
||||||
|
|
||||||
|
def to_hcl(self):
|
||||||
|
|
||||||
|
rules = ""
|
||||||
|
for key, rule in self.rules.iteritems():
|
||||||
|
rules += template % (key, rule.policy)
|
||||||
|
|
||||||
|
return to_ascii(rules)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not (other or isinstance(other, self.__class__)
|
||||||
|
or len(other.rules) == len(self.rules)):
|
||||||
|
return False
|
||||||
|
|
||||||
|
for name, other_rule in other.rules.iteritems():
|
||||||
|
if not name in self.rules:
|
||||||
|
return False
|
||||||
|
rule = self.rules[name]
|
||||||
|
|
||||||
|
if not (rule and rule == other_rule):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.to_hcl()
|
||||||
|
|
||||||
|
class Rule:
|
||||||
|
|
||||||
|
def __init__(self, key, policy):
|
||||||
|
self.key = key
|
||||||
|
self.policy = policy
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (isinstance(other, self.__class__)
|
||||||
|
and self.key == other.key
|
||||||
|
and self.policy == other.policy)
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.key) ^ hash(self.policy)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '%s %s' % (self.key, self.policy)
|
||||||
|
|
||||||
|
def get_consul_api(module, token=None):
|
||||||
|
if not token:
|
||||||
|
token = token = module.params.get('token')
|
||||||
|
return consul.Consul(host=module.params.get('host'),
|
||||||
|
port=module.params.get('port'),
|
||||||
|
token=token)
|
||||||
|
|
||||||
|
def test_dependencies(module):
|
||||||
|
if not python_consul_installed:
|
||||||
|
module.fail_json(msg="python-consul required for this module. "\
|
||||||
|
"see http://python-consul.readthedocs.org/en/latest/#installation")
|
||||||
|
|
||||||
|
if not pyhcl_installed:
|
||||||
|
module.fail_json( msg="pyhcl required for this module."\
|
||||||
|
" see https://pypi.python.org/pypi/pyhcl")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = dict(
|
||||||
|
mgmt_token=dict(required=True),
|
||||||
|
host=dict(default='localhost'),
|
||||||
|
name=dict(required=False),
|
||||||
|
port=dict(default=8500, type='int'),
|
||||||
|
rules=dict(default=None, required=False, type='list'),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
token=dict(required=False),
|
||||||
|
token_type=dict(
|
||||||
|
required=False, choices=['client', 'management'], default='client')
|
||||||
|
)
|
||||||
|
module = AnsibleModule(argument_spec, supports_check_mode=False)
|
||||||
|
|
||||||
|
test_dependencies(module)
|
||||||
|
|
||||||
|
try:
|
||||||
|
execute(module)
|
||||||
|
except ConnectionError, e:
|
||||||
|
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||||
|
module.params.get('host'), module.params.get('port'), str(e)))
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,263 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
module: consul_kv
|
||||||
|
short_description: "manipulate entries in the key/value store of a consul
|
||||||
|
cluster. See http://www.consul.io/docs/agent/http.html#kv for more details."
|
||||||
|
description:
|
||||||
|
- allows the addition, modification and deletion of key/value entries in a
|
||||||
|
consul cluster via the agent. The entire contents of the record, including
|
||||||
|
the indices, flags and session are returned as 'value'. If the key
|
||||||
|
represents a prefix then Note that when a value is removed, the existing
|
||||||
|
value if any is returned as part of the results.
|
||||||
|
requirements:
|
||||||
|
- python-consul
|
||||||
|
- requests
|
||||||
|
version_added: "1.9"
|
||||||
|
author: Steve Gargan (steve.gargan@gmail.com)
|
||||||
|
options:
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- the action to take with the supplied key and value. If the state is
|
||||||
|
'present', the key contents will be set to the value supplied,
|
||||||
|
'changed' will be set to true only if the value was different to the
|
||||||
|
current contents. The state 'absent' will remove the key/value pair,
|
||||||
|
again 'changed' will be set to true only if the key actually existed
|
||||||
|
prior to the removal. An attempt can be made to obtain or free the
|
||||||
|
lock associated with a key/value pair with the states 'acquire' or
|
||||||
|
'release' respectively. a valid session must be supplied to make the
|
||||||
|
attempt changed will be true if the attempt is successful, false
|
||||||
|
otherwise.
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent', 'acquire', 'release']
|
||||||
|
default: present
|
||||||
|
key:
|
||||||
|
description:
|
||||||
|
- the key at which the value should be stored.
|
||||||
|
required: true
|
||||||
|
value:
|
||||||
|
description:
|
||||||
|
- the value should be associated with the given key, required if state
|
||||||
|
is present
|
||||||
|
required: true
|
||||||
|
recurse:
|
||||||
|
description:
|
||||||
|
- if the key represents a prefix, each entry with the prefix can be
|
||||||
|
retrieved by setting this to true.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
session:
|
||||||
|
description:
|
||||||
|
- the session that should be used to acquire or release a lock
|
||||||
|
associated with a key/value pair
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
token:
|
||||||
|
description:
|
||||||
|
- the token key indentifying an ACL rule set that controls access to
|
||||||
|
the key value pair
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
cas:
|
||||||
|
description:
|
||||||
|
- used when acquiring a lock with a session. If the cas is 0, then
|
||||||
|
Consul will only put the key if it does not already exist. If the
|
||||||
|
cas value is non-zero, then the key is only set if the index matches
|
||||||
|
the ModifyIndex of that key.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
flags:
|
||||||
|
description:
|
||||||
|
- opaque integer value that can be passed when setting a value.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- host of the consul agent defaults to localhost
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- the port on which the consul agent is running
|
||||||
|
required: false
|
||||||
|
default: 8500
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
|
||||||
|
- name: add or update the value associated with a key in the key/value store
|
||||||
|
consul_kv:
|
||||||
|
key: somekey
|
||||||
|
value: somevalue
|
||||||
|
|
||||||
|
- name: remove a key from the store
|
||||||
|
consul_kv:
|
||||||
|
key: somekey
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: add a node to an arbitrary group via consul inventory (see consul.ini)
|
||||||
|
consul_kv:
|
||||||
|
key: ansible/groups/dc1/somenode
|
||||||
|
value: 'top_secret'
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
except ImportError:
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
|
try:
|
||||||
|
import consul
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
python_consul_installed = True
|
||||||
|
except ImportError, e:
|
||||||
|
python_consul_installed = False
|
||||||
|
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
|
||||||
|
def execute(module):
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state == 'acquire' or state == 'release':
|
||||||
|
lock(module, state)
|
||||||
|
if state == 'present':
|
||||||
|
add_value(module)
|
||||||
|
else:
|
||||||
|
remove_value(module)
|
||||||
|
|
||||||
|
|
||||||
|
def lock(module, state):
|
||||||
|
|
||||||
|
session = module.params.get('session')
|
||||||
|
key = module.params.get('key')
|
||||||
|
value = module.params.get('value')
|
||||||
|
|
||||||
|
if not session:
|
||||||
|
module.fail(
|
||||||
|
msg='%s of lock for %s requested but no session supplied' %
|
||||||
|
(state, key))
|
||||||
|
|
||||||
|
if state == 'acquire':
|
||||||
|
successful = consul_api.kv.put(key, value,
|
||||||
|
cas=module.params.get('cas'),
|
||||||
|
acquire=session,
|
||||||
|
flags=module.params.get('flags'))
|
||||||
|
else:
|
||||||
|
successful = consul_api.kv.put(key, value,
|
||||||
|
cas=module.params.get('cas'),
|
||||||
|
release=session,
|
||||||
|
flags=module.params.get('flags'))
|
||||||
|
|
||||||
|
module.exit_json(changed=successful,
|
||||||
|
index=index,
|
||||||
|
key=key)
|
||||||
|
|
||||||
|
|
||||||
|
def add_value(module):
|
||||||
|
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
|
||||||
|
key = module.params.get('key')
|
||||||
|
value = module.params.get('value')
|
||||||
|
|
||||||
|
index, existing = consul_api.kv.get(key)
|
||||||
|
|
||||||
|
changed = not existing or (existing and existing['Value'] != value)
|
||||||
|
if changed and not module.check_mode:
|
||||||
|
changed = consul_api.kv.put(key, value,
|
||||||
|
cas=module.params.get('cas'),
|
||||||
|
flags=module.params.get('flags'))
|
||||||
|
|
||||||
|
if module.params.get('retrieve'):
|
||||||
|
index, stored = consul_api.kv.get(key)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
index=index,
|
||||||
|
key=key,
|
||||||
|
data=stored)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_value(module):
|
||||||
|
''' remove the value associated with the given key. if the recurse parameter
|
||||||
|
is set then any key prefixed with the given key will be removed. '''
|
||||||
|
consul_api = get_consul_api(module)
|
||||||
|
|
||||||
|
key = module.params.get('key')
|
||||||
|
value = module.params.get('value')
|
||||||
|
|
||||||
|
index, existing = consul_api.kv.get(
|
||||||
|
key, recurse=module.params.get('recurse'))
|
||||||
|
|
||||||
|
changed = existing != None
|
||||||
|
if changed and not module.check_mode:
|
||||||
|
consul_api.kv.delete(key, module.params.get('recurse'))
|
||||||
|
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
index=index,
|
||||||
|
key=key,
|
||||||
|
data=existing)
|
||||||
|
|
||||||
|
|
||||||
|
def get_consul_api(module, token=None):
|
||||||
|
return consul.Consul(host=module.params.get('host'),
|
||||||
|
port=module.params.get('port'),
|
||||||
|
token=module.params.get('token'))
|
||||||
|
|
||||||
|
def test_dependencies(module):
|
||||||
|
if not python_consul_installed:
|
||||||
|
module.fail_json(msg="python-consul required for this module. "\
|
||||||
|
"see http://python-consul.readthedocs.org/en/latest/#installation")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
argument_spec = dict(
|
||||||
|
cas=dict(required=False),
|
||||||
|
flags=dict(required=False),
|
||||||
|
key=dict(required=True),
|
||||||
|
host=dict(default='localhost'),
|
||||||
|
port=dict(default=8500, type='int'),
|
||||||
|
recurse=dict(required=False, type='bool'),
|
||||||
|
retrieve=dict(required=False, default=True),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
token=dict(required=False, default='anonymous'),
|
||||||
|
value=dict(required=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec, supports_check_mode=False)
|
||||||
|
|
||||||
|
test_dependencies(module)
|
||||||
|
|
||||||
|
try:
|
||||||
|
execute(module)
|
||||||
|
except ConnectionError, e:
|
||||||
|
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||||
|
module.params.get('host'), module.params.get('port'), str(e)))
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,268 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
module: consul_session
|
||||||
|
short_description: "manipulate consul sessions"
|
||||||
|
description:
|
||||||
|
- allows the addition, modification and deletion of sessions in a consul
|
||||||
|
cluster. These sessions can then be used in conjunction with key value pairs
|
||||||
|
to implement distributed locks. In depth documentation for working with
|
||||||
|
sessions can be found here http://www.consul.io/docs/internals/sessions.html
|
||||||
|
requirements:
|
||||||
|
- python-consul
|
||||||
|
- requests
|
||||||
|
version_added: "1.9"
|
||||||
|
author: Steve Gargan (steve.gargan@gmail.com)
|
||||||
|
options:
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- whether the session should be present i.e. created if it doesn't
|
||||||
|
exist, or absent, removed if present. If created, the ID for the
|
||||||
|
session is returned in the output. If absent, the name or ID is
|
||||||
|
required to remove the session. Info for a single session, all the
|
||||||
|
sessions for a node or all available sessions can be retrieved by
|
||||||
|
specifying info, node or list for the state; for node or info, the
|
||||||
|
node name or session id is required as parameter.
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent', 'info', 'node', 'list']
|
||||||
|
default: present
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- the name that should be associated with the session. This is opaque
|
||||||
|
to Consul and not required.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
delay:
|
||||||
|
description:
|
||||||
|
- the optional lock delay that can be attached to the session when it
|
||||||
|
is created. Locks for invalidated sessions ar blocked from being
|
||||||
|
acquired until this delay has expired. Valid units for delays
|
||||||
|
include 'ns', 'us', 'ms', 's', 'm', 'h'
|
||||||
|
default: 15s
|
||||||
|
required: false
|
||||||
|
node:
|
||||||
|
description:
|
||||||
|
- the name of the node that with which the session will be associated.
|
||||||
|
by default this is the name of the agent.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
datacenter:
|
||||||
|
description:
|
||||||
|
- name of the datacenter in which the session exists or should be
|
||||||
|
created.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
checks:
|
||||||
|
description:
|
||||||
|
- a list of checks that will be used to verify the session health. If
|
||||||
|
all the checks fail, the session will be invalidated and any locks
|
||||||
|
associated with the session will be release and can be acquired once
|
||||||
|
the associated lock delay has expired.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- host of the consul agent defaults to localhost
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- the port on which the consul agent is running
|
||||||
|
required: false
|
||||||
|
default: 8500
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: register basic session with consul
|
||||||
|
consul_session:
|
||||||
|
name: session1
|
||||||
|
|
||||||
|
- name: register a session with an existing check
|
||||||
|
consul_session:
|
||||||
|
name: session_with_check
|
||||||
|
checks:
|
||||||
|
- existing_check_name
|
||||||
|
|
||||||
|
- name: register a session with lock_delay
|
||||||
|
consul_session:
|
||||||
|
name: session_with_delay
|
||||||
|
delay: 20s
|
||||||
|
|
||||||
|
- name: retrieve info about session by id
|
||||||
|
consul_session: id=session_id state=info
|
||||||
|
|
||||||
|
- name: retrieve active sessions
|
||||||
|
consul_session: state=list
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
try:
|
||||||
|
import consul
|
||||||
|
from requests.exceptions import ConnectionError
|
||||||
|
python_consul_installed = True
|
||||||
|
except ImportError, e:
|
||||||
|
python_consul_installed = False
|
||||||
|
|
||||||
|
def execute(module):
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state in ['info', 'list', 'node']:
|
||||||
|
lookup_sessions(module)
|
||||||
|
elif state == 'present':
|
||||||
|
update_session(module)
|
||||||
|
else:
|
||||||
|
remove_session(module)
|
||||||
|
|
||||||
|
def lookup_sessions(module):
|
||||||
|
|
||||||
|
datacenter = module.params.get('datacenter')
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
consul = get_consul_api(module)
|
||||||
|
try:
|
||||||
|
if state == 'list':
|
||||||
|
sessions_list = consul.session.list(dc=datacenter)
|
||||||
|
#ditch the index, this can be grabbed from the results
|
||||||
|
if sessions_list and sessions_list[1]:
|
||||||
|
sessions_list = sessions_list[1]
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
sessions=sessions_list)
|
||||||
|
elif state == 'node':
|
||||||
|
node = module.params.get('node')
|
||||||
|
if not node:
|
||||||
|
module.fail_json(
|
||||||
|
msg="node name is required to retrieve sessions for node")
|
||||||
|
sessions = consul.session.node(node, dc=datacenter)
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
node=node,
|
||||||
|
sessions=sessions)
|
||||||
|
elif state == 'info':
|
||||||
|
session_id = module.params.get('id')
|
||||||
|
if not session_id:
|
||||||
|
module.fail_json(
|
||||||
|
msg="session_id is required to retrieve indvidual session info")
|
||||||
|
|
||||||
|
session_by_id = consul.session.info(session_id, dc=datacenter)
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
session_id=session_id,
|
||||||
|
sessions=session_by_id)
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Could not retrieve session info %s" % e)
|
||||||
|
|
||||||
|
|
||||||
|
def update_session(module):
|
||||||
|
|
||||||
|
name = module.params.get('name')
|
||||||
|
session_id = module.params.get('id')
|
||||||
|
delay = module.params.get('delay')
|
||||||
|
checks = module.params.get('checks')
|
||||||
|
datacenter = module.params.get('datacenter')
|
||||||
|
node = module.params.get('node')
|
||||||
|
|
||||||
|
consul = get_consul_api(module)
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
session = consul.session.create(
|
||||||
|
name=name,
|
||||||
|
node=node,
|
||||||
|
lock_delay=validate_duration('delay', delay),
|
||||||
|
dc=datacenter,
|
||||||
|
checks=checks
|
||||||
|
)
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
session_id=session,
|
||||||
|
name=name,
|
||||||
|
delay=delay,
|
||||||
|
checks=checks,
|
||||||
|
node=node)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Could not create/update session %s" % e)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_session(module):
|
||||||
|
session_id = module.params.get('id')
|
||||||
|
|
||||||
|
if not session_id:
|
||||||
|
module.fail_json(msg="""A session id must be supplied in order to
|
||||||
|
remove a session.""")
|
||||||
|
|
||||||
|
consul = get_consul_api(module)
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
session = consul.session.destroy(session_id)
|
||||||
|
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
session_id=session_id)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Could not remove session with id '%s' %s" % (
|
||||||
|
session_id, e))
|
||||||
|
|
||||||
|
def validate_duration(name, duration):
|
||||||
|
if duration:
|
||||||
|
duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
|
||||||
|
if not any((duration.endswith(suffix) for suffix in duration_units)):
|
||||||
|
raise Exception('Invalid %s %s you must specify units (%s)' %
|
||||||
|
(name, duration, ', '.join(duration_units)))
|
||||||
|
return duration
|
||||||
|
|
||||||
|
def get_consul_api(module):
|
||||||
|
return consul.Consul(host=module.params.get('host'),
|
||||||
|
port=module.params.get('port'))
|
||||||
|
|
||||||
|
def test_dependencies(module):
|
||||||
|
if not python_consul_installed:
|
||||||
|
module.fail_json(msg="python-consul required for this module. "\
|
||||||
|
"see http://python-consul.readthedocs.org/en/latest/#installation")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = dict(
|
||||||
|
checks=dict(default=None, required=False, type='list'),
|
||||||
|
delay=dict(required=False,type='str', default='15s'),
|
||||||
|
host=dict(default='localhost'),
|
||||||
|
port=dict(default=8500, type='int'),
|
||||||
|
id=dict(required=False),
|
||||||
|
name=dict(required=False),
|
||||||
|
node=dict(required=False),
|
||||||
|
state=dict(default='present',
|
||||||
|
choices=['present', 'absent', 'info', 'node', 'list'])
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec, supports_check_mode=False)
|
||||||
|
|
||||||
|
test_dependencies(module)
|
||||||
|
|
||||||
|
try:
|
||||||
|
execute(module)
|
||||||
|
except ConnectionError, e:
|
||||||
|
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||||
|
module.params.get('host'), module.params.get('port'), str(e)))
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,188 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: postgresql_ext
|
||||||
|
short_description: Add or remove PostgreSQL extensions from a database.
|
||||||
|
description:
|
||||||
|
- Add or remove PostgreSQL extensions from a database.
|
||||||
|
version_added: "0.1"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- name of the extension to add or remove
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- name of the database to add or remove the extension to/from
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
login_host:
|
||||||
|
description:
|
||||||
|
- Host running the database
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Database port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5432
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- The database extension state
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ "present", "absent" ]
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
|
||||||
|
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
|
||||||
|
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
|
||||||
|
requirements: [ psycopg2 ]
|
||||||
|
author: Daniel Schep
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Adds postgis to the database "acme"
|
||||||
|
- postgresql_ext: name=postgis db=acme
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
import psycopg2
|
||||||
|
import psycopg2.extras
|
||||||
|
except ImportError:
|
||||||
|
postgresqldb_found = False
|
||||||
|
else:
|
||||||
|
postgresqldb_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# PostgreSQL module specific support methods.
|
||||||
|
#
|
||||||
|
|
||||||
|
def ext_exists(cursor, ext):
|
||||||
|
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
|
||||||
|
cursor.execute(query, {'ext': ext})
|
||||||
|
return cursor.rowcount == 1
|
||||||
|
|
||||||
|
def ext_delete(cursor, ext):
|
||||||
|
if ext_exists(cursor, ext):
|
||||||
|
query = "DROP EXTENSION \"%s\"" % ext
|
||||||
|
cursor.execute(query)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def ext_create(cursor, ext):
|
||||||
|
if not ext_exists(cursor, ext):
|
||||||
|
query = 'CREATE EXTENSION "%s"' % ext
|
||||||
|
cursor.execute(query)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Module execution.
|
||||||
|
#
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
login_user=dict(default="postgres"),
|
||||||
|
login_password=dict(default=""),
|
||||||
|
login_host=dict(default=""),
|
||||||
|
port=dict(default="5432"),
|
||||||
|
db=dict(required=True),
|
||||||
|
ext=dict(required=True, aliases=['name']),
|
||||||
|
state=dict(default="present", choices=["absent", "present"]),
|
||||||
|
),
|
||||||
|
supports_check_mode = True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not postgresqldb_found:
|
||||||
|
module.fail_json(msg="the python psycopg2 module is required")
|
||||||
|
|
||||||
|
db = module.params["db"]
|
||||||
|
ext = module.params["ext"]
|
||||||
|
port = module.params["port"]
|
||||||
|
state = module.params["state"]
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
# To use defaults values, keyword arguments must be absent, so
|
||||||
|
# check which values are empty and don't include in the **kw
|
||||||
|
# dictionary
|
||||||
|
params_map = {
|
||||||
|
"login_host":"host",
|
||||||
|
"login_user":"user",
|
||||||
|
"login_password":"password",
|
||||||
|
"port":"port"
|
||||||
|
}
|
||||||
|
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
|
||||||
|
if k in params_map and v != '' )
|
||||||
|
try:
|
||||||
|
db_connection = psycopg2.connect(database=db, **kw)
|
||||||
|
# Enable autocommit so we can create databases
|
||||||
|
if psycopg2.__version__ >= '2.4.2':
|
||||||
|
db_connection.autocommit = True
|
||||||
|
else:
|
||||||
|
db_connection.set_isolation_level(psycopg2
|
||||||
|
.extensions
|
||||||
|
.ISOLATION_LEVEL_AUTOCOMMIT)
|
||||||
|
cursor = db_connection.cursor(
|
||||||
|
cursor_factory=psycopg2.extras.DictCursor)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="unable to connect to database: %s" % e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if module.check_mode:
|
||||||
|
if state == "absent":
|
||||||
|
changed = not db_exists(cursor, ext)
|
||||||
|
elif state == "present":
|
||||||
|
changed = db_exists(cursor, ext)
|
||||||
|
module.exit_json(changed=changed,ext=ext)
|
||||||
|
|
||||||
|
if state == "absent":
|
||||||
|
changed = ext_delete(cursor, ext)
|
||||||
|
|
||||||
|
elif state == "present":
|
||||||
|
changed = ext_create(cursor, ext)
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Database query failed: %s" % e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, db=db)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,256 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2014, Jens Depuydt <http://www.jensd.be>
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: postgresql_lang
|
||||||
|
short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
|
||||||
|
description:
|
||||||
|
- Adds, removes or changes procedural languages with a PostgreSQL database.
|
||||||
|
- This module allows you to add a language, remote a language or change the trust
|
||||||
|
relationship with a PostgreSQL database. The module can be used on the machine
|
||||||
|
where executed or on a remote host.
|
||||||
|
- When removing a language from a database, it is possible that dependencies prevent
|
||||||
|
the database from being removed. In that case, you can specify casade to
|
||||||
|
automatically drop objects that depend on the language (such as functions in the
|
||||||
|
language). In case the language can't be deleted because it is required by the
|
||||||
|
database system, you can specify fail_on_drop=no to ignore the error.
|
||||||
|
- Be carefull when marking a language as trusted since this could be a potential
|
||||||
|
security breach. Untrusted languages allow only users with the PostgreSQL superuser
|
||||||
|
privilege to use this language to create new functions.
|
||||||
|
version_added: "1.7"
|
||||||
|
options:
|
||||||
|
lang:
|
||||||
|
description:
|
||||||
|
- name of the procedural language to add, remove or change
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
trust:
|
||||||
|
description:
|
||||||
|
- make this language trusted for the selected db
|
||||||
|
required: false
|
||||||
|
default: no
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- name of database where the language will be added, removed or changed
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
force_trust:
|
||||||
|
description:
|
||||||
|
- marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
|
||||||
|
- use with care!
|
||||||
|
required: false
|
||||||
|
default: no
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
fail_on_drop:
|
||||||
|
description:
|
||||||
|
- if C(yes), fail when removing a language. Otherwise just log and continue
|
||||||
|
- in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
|
||||||
|
required: false
|
||||||
|
default: 'yes'
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
cascade:
|
||||||
|
description:
|
||||||
|
- when dropping a language, also delete object that depend on this language.
|
||||||
|
- only used when C(state=absent).
|
||||||
|
required: false
|
||||||
|
default: no
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Database port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5432
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- User used to authenticate with PostgreSQL
|
||||||
|
required: false
|
||||||
|
default: postgres
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- Password used to authenticate with PostgreSQL (must match C(login_user))
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
login_host:
|
||||||
|
description:
|
||||||
|
- Host running PostgreSQL where you want to execute the actions.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- The state of the language for the selected database
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ "present", "absent" ]
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or
|
||||||
|
sudo'ing to the postgres account on the host.
|
||||||
|
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
||||||
|
ensure that psycopg2 is installed on the host before using this module. If
|
||||||
|
the remote host is the PostgreSQL server (which is the default case), then
|
||||||
|
PostgreSQL must also be installed on the remote host. For Ubuntu-based
|
||||||
|
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||||
|
on the remote host before using this module.
|
||||||
|
requirements: [ psycopg2 ]
|
||||||
|
author: Jens Depuydt
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Add language pltclu to database testdb if it doesn't exist:
|
||||||
|
- postgresql_lang db=testdb lang=pltclu state=present
|
||||||
|
|
||||||
|
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
|
||||||
|
# Marks the language as trusted if it exists but isn't trusted yet
|
||||||
|
# force_trust makes sure that the language will be marked as trusted
|
||||||
|
- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes
|
||||||
|
|
||||||
|
# Remove language pltclu from database testdb:
|
||||||
|
- postgresql_lang: db=testdb lang=pltclu state=absent
|
||||||
|
|
||||||
|
# Remove language pltclu from database testdb and remove all dependencies:
|
||||||
|
- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes
|
||||||
|
|
||||||
|
# Remove language c from database testdb but ignore errors if something prevents the removal:
|
||||||
|
- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
import psycopg2
|
||||||
|
except ImportError:
|
||||||
|
postgresqldb_found = False
|
||||||
|
else:
|
||||||
|
postgresqldb_found = True
|
||||||
|
|
||||||
|
def lang_exists(cursor, lang):
|
||||||
|
"""Checks if language exists for db"""
|
||||||
|
query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
|
||||||
|
cursor.execute(query)
|
||||||
|
return cursor.rowcount > 0
|
||||||
|
|
||||||
|
def lang_istrusted(cursor, lang):
|
||||||
|
"""Checks if language is trusted for db"""
|
||||||
|
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
|
||||||
|
cursor.execute(query)
|
||||||
|
return cursor.fetchone()[0]
|
||||||
|
|
||||||
|
def lang_altertrust(cursor, lang, trust):
|
||||||
|
"""Changes if language is trusted for db"""
|
||||||
|
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
|
||||||
|
cursor.execute(query, (trust, lang))
|
||||||
|
return True
|
||||||
|
|
||||||
|
def lang_add(cursor, lang, trust):
|
||||||
|
"""Adds language for db"""
|
||||||
|
if trust:
|
||||||
|
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
|
||||||
|
else:
|
||||||
|
query = 'CREATE LANGUAGE "%s"' % lang
|
||||||
|
cursor.execute(query)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def lang_drop(cursor, lang, cascade):
|
||||||
|
"""Drops language for db"""
|
||||||
|
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
|
||||||
|
try:
|
||||||
|
if cascade:
|
||||||
|
cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
|
||||||
|
else:
|
||||||
|
cursor.execute("DROP LANGUAGE \"%s\"" % lang)
|
||||||
|
except:
|
||||||
|
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
|
||||||
|
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
|
||||||
|
return False
|
||||||
|
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
login_user=dict(default="postgres"),
|
||||||
|
login_password=dict(default=""),
|
||||||
|
login_host=dict(default=""),
|
||||||
|
db=dict(required=True),
|
||||||
|
port=dict(default='5432'),
|
||||||
|
lang=dict(required=True),
|
||||||
|
state=dict(default="present", choices=["absent", "present"]),
|
||||||
|
trust=dict(type='bool', default='no'),
|
||||||
|
force_trust=dict(type='bool', default='no'),
|
||||||
|
cascade=dict(type='bool', default='no'),
|
||||||
|
fail_on_drop=dict(type='bool', default='yes'),
|
||||||
|
),
|
||||||
|
supports_check_mode = True
|
||||||
|
)
|
||||||
|
|
||||||
|
db = module.params["db"]
|
||||||
|
port = module.params["port"]
|
||||||
|
lang = module.params["lang"]
|
||||||
|
state = module.params["state"]
|
||||||
|
trust = module.params["trust"]
|
||||||
|
force_trust = module.params["force_trust"]
|
||||||
|
cascade = module.params["cascade"]
|
||||||
|
fail_on_drop = module.params["fail_on_drop"]
|
||||||
|
|
||||||
|
if not postgresqldb_found:
|
||||||
|
module.fail_json(msg="the python psycopg2 module is required")
|
||||||
|
|
||||||
|
params_map = {
|
||||||
|
"login_host":"host",
|
||||||
|
"login_user":"user",
|
||||||
|
"login_password":"password",
|
||||||
|
"port":"port",
|
||||||
|
"db":"database"
|
||||||
|
}
|
||||||
|
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
|
||||||
|
if k in params_map and v != "" )
|
||||||
|
try:
|
||||||
|
db_connection = psycopg2.connect(**kw)
|
||||||
|
cursor = db_connection.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="unable to connect to database: %s" % e)
|
||||||
|
changed = False
|
||||||
|
lang_dropped = False
|
||||||
|
kw = dict(db=db,lang=lang,trust=trust)
|
||||||
|
|
||||||
|
if state == "present":
|
||||||
|
if lang_exists(cursor, lang):
|
||||||
|
lang_trusted = lang_istrusted(cursor, lang)
|
||||||
|
if (lang_trusted and not trust) or (not lang_trusted and trust):
|
||||||
|
if module.check_mode:
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
changed = lang_altertrust(cursor, lang, trust)
|
||||||
|
else:
|
||||||
|
if module.check_mode:
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
changed = lang_add(cursor, lang, trust)
|
||||||
|
if force_trust:
|
||||||
|
changed = lang_altertrust(cursor, lang, trust)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if lang_exists(cursor, lang):
|
||||||
|
if module.check_mode:
|
||||||
|
changed = True
|
||||||
|
kw['lang_dropped'] = True
|
||||||
|
else:
|
||||||
|
changed = lang_drop(cursor, lang, cascade)
|
||||||
|
if fail_on_drop and not changed:
|
||||||
|
msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
|
||||||
|
module.fail_json(msg=msg)
|
||||||
|
kw['lang_dropped'] = changed
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
if module.check_mode:
|
||||||
|
db_connection.rollback()
|
||||||
|
else:
|
||||||
|
db_connection.commit()
|
||||||
|
|
||||||
|
kw['changed'] = changed
|
||||||
|
module.exit_json(**kw)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,194 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: vertica_configuration
|
||||||
|
version_added: '2.0'
|
||||||
|
short_description: Updates Vertica configuration parameters.
|
||||||
|
description:
|
||||||
|
- Updates Vertica configuration parameters.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the parameter to update.
|
||||||
|
required: true
|
||||||
|
value:
|
||||||
|
description:
|
||||||
|
- Value of the parameter to be set.
|
||||||
|
required: true
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica database.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
cluster:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica cluster.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Vertica cluster port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5433
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: dbadmin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||||
|
to the C(dbadmin) account on the host.
|
||||||
|
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||||
|
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||||
|
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||||
|
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||||
|
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||||
|
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||||
|
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||||
|
author: Dariusz Owczarek
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: updating load_balance_policy
|
||||||
|
vertica_configuration: name=failovertostandbyafter value='8 hours'
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pyodbc
|
||||||
|
except ImportError:
|
||||||
|
pyodbc_found = False
|
||||||
|
else:
|
||||||
|
pyodbc_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CannotDropError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# module specific functions
|
||||||
|
|
||||||
|
def get_configuration_facts(cursor, parameter_name=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select c.parameter_name, c.current_value, c.default_value
|
||||||
|
from configuration_parameters c
|
||||||
|
where c.node_name = 'ALL'
|
||||||
|
and (? = '' or c.parameter_name ilike ?)
|
||||||
|
""", parameter_name, parameter_name)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
facts[row.parameter_name.lower()] = {
|
||||||
|
'parameter_name': row.parameter_name,
|
||||||
|
'current_value': row.current_value,
|
||||||
|
'default_value': row.default_value}
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def check(configuration_facts, parameter_name, current_value):
|
||||||
|
parameter_key = parameter_name.lower()
|
||||||
|
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def present(configuration_facts, cursor, parameter_name, current_value):
|
||||||
|
parameter_key = parameter_name.lower()
|
||||||
|
changed = False
|
||||||
|
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
|
||||||
|
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
|
||||||
|
return changed
|
||||||
|
|
||||||
|
# module logic
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
parameter=dict(required=True, aliases=['name']),
|
||||||
|
value=dict(default=None),
|
||||||
|
db=dict(default=None),
|
||||||
|
cluster=dict(default='localhost'),
|
||||||
|
port=dict(default='5433'),
|
||||||
|
login_user=dict(default='dbadmin'),
|
||||||
|
login_password=dict(default=None),
|
||||||
|
), supports_check_mode = True)
|
||||||
|
|
||||||
|
if not pyodbc_found:
|
||||||
|
module.fail_json(msg="The python pyodbc module is required.")
|
||||||
|
|
||||||
|
parameter_name = module.params['parameter']
|
||||||
|
current_value = module.params['value']
|
||||||
|
db = ''
|
||||||
|
if module.params['db']:
|
||||||
|
db = module.params['db']
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
dsn = (
|
||||||
|
"Driver=Vertica;"
|
||||||
|
"Server={0};"
|
||||||
|
"Port={1};"
|
||||||
|
"Database={2};"
|
||||||
|
"User={3};"
|
||||||
|
"Password={4};"
|
||||||
|
"ConnectionLoadBalance={5}"
|
||||||
|
).format(module.params['cluster'], module.params['port'], db,
|
||||||
|
module.params['login_user'], module.params['login_password'], 'true')
|
||||||
|
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||||
|
cursor = db_conn.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
configuration_facts = get_configuration_facts(cursor)
|
||||||
|
if module.check_mode:
|
||||||
|
changed = not check(configuration_facts, parameter_name, current_value)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
changed = present(configuration_facts, cursor, parameter_name, current_value)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
|
||||||
|
except CannotDropError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
|
||||||
|
except SystemExit:
|
||||||
|
# avoid catching this on python 2.4
|
||||||
|
raise
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
|
||||||
|
|
||||||
|
# import ansible utilities
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,276 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: vertica_facts
|
||||||
|
version_added: '2.0'
|
||||||
|
short_description: Gathers Vertica database facts.
|
||||||
|
description:
|
||||||
|
- Gathers Vertica database facts.
|
||||||
|
options:
|
||||||
|
cluster:
|
||||||
|
description:
|
||||||
|
- Name of the cluster running the schema.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
Database port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5433
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- Name of the database running the schema.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: dbadmin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||||
|
to the C(dbadmin) account on the host.
|
||||||
|
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||||
|
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||||
|
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||||
|
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||||
|
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||||
|
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||||
|
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||||
|
author: Dariusz Owczarek
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: gathering vertica facts
|
||||||
|
vertica_facts: db=db_name
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pyodbc
|
||||||
|
except ImportError:
|
||||||
|
pyodbc_found = False
|
||||||
|
else:
|
||||||
|
pyodbc_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# module specific functions
|
||||||
|
|
||||||
|
def get_schema_facts(cursor, schema=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select schema_name, schema_owner, create_time
|
||||||
|
from schemata
|
||||||
|
where not is_system_schema and schema_name not in ('public')
|
||||||
|
and (? = '' or schema_name ilike ?)
|
||||||
|
""", schema, schema)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
facts[row.schema_name.lower()] = {
|
||||||
|
'name': row.schema_name,
|
||||||
|
'owner': row.schema_owner,
|
||||||
|
'create_time': str(row.create_time),
|
||||||
|
'usage_roles': [],
|
||||||
|
'create_roles': []}
|
||||||
|
cursor.execute("""
|
||||||
|
select g.object_name as schema_name, r.name as role_name,
|
||||||
|
lower(g.privileges_description) privileges_description
|
||||||
|
from roles r join grants g
|
||||||
|
on g.grantee = r.name and g.object_type='SCHEMA'
|
||||||
|
and g.privileges_description like '%USAGE%'
|
||||||
|
and g.grantee not in ('public', 'dbadmin')
|
||||||
|
and (? = '' or g.object_name ilike ?)
|
||||||
|
""", schema, schema)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
schema_key = row.schema_name.lower()
|
||||||
|
if 'create' in row.privileges_description:
|
||||||
|
facts[schema_key]['create_roles'].append(row.role_name)
|
||||||
|
else:
|
||||||
|
facts[schema_key]['usage_roles'].append(row.role_name)
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def get_user_facts(cursor, user=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select u.user_name, u.is_locked, u.lock_time,
|
||||||
|
p.password, p.acctexpired as is_expired,
|
||||||
|
u.profile_name, u.resource_pool,
|
||||||
|
u.all_roles, u.default_roles
|
||||||
|
from users u join password_auditor p on p.user_id = u.user_id
|
||||||
|
where not u.is_super_user
|
||||||
|
and (? = '' or u.user_name ilike ?)
|
||||||
|
""", user, user)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
user_key = row.user_name.lower()
|
||||||
|
facts[user_key] = {
|
||||||
|
'name': row.user_name,
|
||||||
|
'locked': str(row.is_locked),
|
||||||
|
'password': row.password,
|
||||||
|
'expired': str(row.is_expired),
|
||||||
|
'profile': row.profile_name,
|
||||||
|
'resource_pool': row.resource_pool,
|
||||||
|
'roles': [],
|
||||||
|
'default_roles': []}
|
||||||
|
if row.is_locked:
|
||||||
|
facts[user_key]['locked_time'] = str(row.lock_time)
|
||||||
|
if row.all_roles:
|
||||||
|
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
|
||||||
|
if row.default_roles:
|
||||||
|
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def get_role_facts(cursor, role=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select r.name, r.assigned_roles
|
||||||
|
from roles r
|
||||||
|
where (? = '' or r.name ilike ?)
|
||||||
|
""", role, role)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
role_key = row.name.lower()
|
||||||
|
facts[role_key] = {
|
||||||
|
'name': row.name,
|
||||||
|
'assigned_roles': []}
|
||||||
|
if row.assigned_roles:
|
||||||
|
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def get_configuration_facts(cursor, parameter=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select c.parameter_name, c.current_value, c.default_value
|
||||||
|
from configuration_parameters c
|
||||||
|
where c.node_name = 'ALL'
|
||||||
|
and (? = '' or c.parameter_name ilike ?)
|
||||||
|
""", parameter, parameter)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
facts[row.parameter_name.lower()] = {
|
||||||
|
'parameter_name': row.parameter_name,
|
||||||
|
'current_value': row.current_value,
|
||||||
|
'default_value': row.default_value}
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def get_node_facts(cursor, schema=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select node_name, node_address, export_address, node_state, node_type,
|
||||||
|
catalog_path
|
||||||
|
from nodes
|
||||||
|
""")
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
facts[row.node_address] = {
|
||||||
|
'node_name': row.node_name,
|
||||||
|
'export_address': row.export_address,
|
||||||
|
'node_state': row.node_state,
|
||||||
|
'node_type': row.node_type,
|
||||||
|
'catalog_path': row.catalog_path}
|
||||||
|
return facts
|
||||||
|
|
||||||
|
# module logic
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
cluster=dict(default='localhost'),
|
||||||
|
port=dict(default='5433'),
|
||||||
|
db=dict(default=None),
|
||||||
|
login_user=dict(default='dbadmin'),
|
||||||
|
login_password=dict(default=None),
|
||||||
|
), supports_check_mode = True)
|
||||||
|
|
||||||
|
if not pyodbc_found:
|
||||||
|
module.fail_json(msg="The python pyodbc module is required.")
|
||||||
|
|
||||||
|
db = ''
|
||||||
|
if module.params['db']:
|
||||||
|
db = module.params['db']
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
dsn = (
|
||||||
|
"Driver=Vertica;"
|
||||||
|
"Server={0};"
|
||||||
|
"Port={1};"
|
||||||
|
"Database={2};"
|
||||||
|
"User={3};"
|
||||||
|
"Password={4};"
|
||||||
|
"ConnectionLoadBalance={5}"
|
||||||
|
).format(module.params['cluster'], module.params['port'], db,
|
||||||
|
module.params['login_user'], module.params['login_password'], 'true')
|
||||||
|
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||||
|
cursor = db_conn.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
schema_facts = get_schema_facts(cursor)
|
||||||
|
user_facts = get_user_facts(cursor)
|
||||||
|
role_facts = get_role_facts(cursor)
|
||||||
|
configuration_facts = get_configuration_facts(cursor)
|
||||||
|
node_facts = get_node_facts(cursor)
|
||||||
|
module.exit_json(changed=False,
|
||||||
|
ansible_facts={'vertica_schemas': schema_facts,
|
||||||
|
'vertica_users': user_facts,
|
||||||
|
'vertica_roles': role_facts,
|
||||||
|
'vertica_configuration': configuration_facts,
|
||||||
|
'vertica_nodes': node_facts})
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except SystemExit:
|
||||||
|
# avoid catching this on python 2.4
|
||||||
|
raise
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
# import ansible utilities
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,243 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: vertica_role
|
||||||
|
version_added: '2.0'
|
||||||
|
short_description: Adds or removes Vertica database roles and assigns roles to them.
|
||||||
|
description:
|
||||||
|
- Adds or removes Vertica database role and, optionally, assign other roles.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the role to add or remove.
|
||||||
|
required: true
|
||||||
|
assigned_roles:
|
||||||
|
description:
|
||||||
|
- Comma separated list of roles to assign to the role.
|
||||||
|
aliases: ['assigned_role']
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
default: present
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica database.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
cluster:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica cluster.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Vertica cluster port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5433
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: dbadmin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||||
|
to the C(dbadmin) account on the host.
|
||||||
|
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||||
|
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||||
|
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||||
|
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||||
|
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||||
|
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||||
|
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||||
|
author: Dariusz Owczarek
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: creating a new vertica role
|
||||||
|
vertica_role: name=role_name db=db_name state=present
|
||||||
|
|
||||||
|
- name: creating a new vertica role with other role assigned
|
||||||
|
vertica_role: name=role_name assigned_role=other_role_name state=present
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pyodbc
|
||||||
|
except ImportError:
|
||||||
|
pyodbc_found = False
|
||||||
|
else:
|
||||||
|
pyodbc_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CannotDropError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# module specific functions
|
||||||
|
|
||||||
|
def get_role_facts(cursor, role=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select r.name, r.assigned_roles
|
||||||
|
from roles r
|
||||||
|
where (? = '' or r.name ilike ?)
|
||||||
|
""", role, role)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
role_key = row.name.lower()
|
||||||
|
facts[role_key] = {
|
||||||
|
'name': row.name,
|
||||||
|
'assigned_roles': []}
|
||||||
|
if row.assigned_roles:
|
||||||
|
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def update_roles(role_facts, cursor, role,
|
||||||
|
existing, required):
|
||||||
|
for assigned_role in set(existing) - set(required):
|
||||||
|
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
|
||||||
|
for assigned_role in set(required) - set(existing):
|
||||||
|
cursor.execute("grant {0} to {1}".format(assigned_role, role))
|
||||||
|
|
||||||
|
def check(role_facts, role, assigned_roles):
|
||||||
|
role_key = role.lower()
|
||||||
|
if role_key not in role_facts:
|
||||||
|
return False
|
||||||
|
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def present(role_facts, cursor, role, assigned_roles):
|
||||||
|
role_key = role.lower()
|
||||||
|
if role_key not in role_facts:
|
||||||
|
cursor.execute("create role {0}".format(role))
|
||||||
|
update_roles(role_facts, cursor, role, [], assigned_roles)
|
||||||
|
role_facts.update(get_role_facts(cursor, role))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
changed = False
|
||||||
|
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
|
||||||
|
update_roles(role_facts, cursor, role,
|
||||||
|
role_facts[role_key]['assigned_roles'], assigned_roles)
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
role_facts.update(get_role_facts(cursor, role))
|
||||||
|
return changed
|
||||||
|
|
||||||
|
def absent(role_facts, cursor, role, assigned_roles):
|
||||||
|
role_key = role.lower()
|
||||||
|
if role_key in role_facts:
|
||||||
|
update_roles(role_facts, cursor, role,
|
||||||
|
role_facts[role_key]['assigned_roles'], [])
|
||||||
|
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
|
||||||
|
del role_facts[role_key]
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# module logic
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
role=dict(required=True, aliases=['name']),
|
||||||
|
assigned_roles=dict(default=None, aliases=['assigned_role']),
|
||||||
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
|
db=dict(default=None),
|
||||||
|
cluster=dict(default='localhost'),
|
||||||
|
port=dict(default='5433'),
|
||||||
|
login_user=dict(default='dbadmin'),
|
||||||
|
login_password=dict(default=None),
|
||||||
|
), supports_check_mode = True)
|
||||||
|
|
||||||
|
if not pyodbc_found:
|
||||||
|
module.fail_json(msg="The python pyodbc module is required.")
|
||||||
|
|
||||||
|
role = module.params['role']
|
||||||
|
assigned_roles = []
|
||||||
|
if module.params['assigned_roles']:
|
||||||
|
assigned_roles = module.params['assigned_roles'].split(',')
|
||||||
|
assigned_roles = filter(None, assigned_roles)
|
||||||
|
state = module.params['state']
|
||||||
|
db = ''
|
||||||
|
if module.params['db']:
|
||||||
|
db = module.params['db']
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
dsn = (
|
||||||
|
"Driver=Vertica;"
|
||||||
|
"Server={0};"
|
||||||
|
"Port={1};"
|
||||||
|
"Database={2};"
|
||||||
|
"User={3};"
|
||||||
|
"Password={4};"
|
||||||
|
"ConnectionLoadBalance={5}"
|
||||||
|
).format(module.params['cluster'], module.params['port'], db,
|
||||||
|
module.params['login_user'], module.params['login_password'], 'true')
|
||||||
|
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||||
|
cursor = db_conn.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
role_facts = get_role_facts(cursor)
|
||||||
|
if module.check_mode:
|
||||||
|
changed = not check(role_facts, role, assigned_roles)
|
||||||
|
elif state == 'absent':
|
||||||
|
try:
|
||||||
|
changed = absent(role_facts, cursor, role, assigned_roles)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
elif state == 'present':
|
||||||
|
try:
|
||||||
|
changed = present(role_facts, cursor, role, assigned_roles)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
|
||||||
|
except CannotDropError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
|
||||||
|
except SystemExit:
|
||||||
|
# avoid catching this on python 2.4
|
||||||
|
raise
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
|
||||||
|
|
||||||
|
# import ansible utilities
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,317 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: vertica_schema
|
||||||
|
version_added: '2.0'
|
||||||
|
short_description: Adds or removes Vertica database schema and roles.
|
||||||
|
description:
|
||||||
|
- Adds or removes Vertica database schema and, optionally, roles
|
||||||
|
with schema access privileges.
|
||||||
|
- A schema will not be removed until all the objects have been dropped.
|
||||||
|
- In such a situation, if the module tries to remove the schema it
|
||||||
|
will fail and only remove roles created for the schema if they have
|
||||||
|
no dependencies.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the schema to add or remove.
|
||||||
|
required: true
|
||||||
|
usage_roles:
|
||||||
|
description:
|
||||||
|
- Comma separated list of roles to create and grant usage access to the schema.
|
||||||
|
aliases: ['usage_role']
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
create_roles:
|
||||||
|
description:
|
||||||
|
- Comma separated list of roles to create and grant usage and create access to the schema.
|
||||||
|
aliases: ['create_role']
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
owner:
|
||||||
|
description:
|
||||||
|
- Name of the user to set as owner of the schema.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to create C(present), or drop C(absent) a schema.
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica database.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
cluster:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica cluster.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Vertica cluster port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5433
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: dbadmin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||||
|
to the C(dbadmin) account on the host.
|
||||||
|
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||||
|
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||||
|
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||||
|
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||||
|
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||||
|
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||||
|
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||||
|
author: Dariusz Owczarek
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: creating a new vertica schema
|
||||||
|
vertica_schema: name=schema_name db=db_name state=present
|
||||||
|
|
||||||
|
- name: creating a new schema with specific schema owner
|
||||||
|
vertica_schema: name=schema_name owner=dbowner db=db_name state=present
|
||||||
|
|
||||||
|
- name: creating a new schema with roles
|
||||||
|
vertica_schema:
|
||||||
|
name=schema_name
|
||||||
|
create_roles=schema_name_all
|
||||||
|
usage_roles=schema_name_ro,schema_name_rw
|
||||||
|
db=db_name
|
||||||
|
state=present
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pyodbc
|
||||||
|
except ImportError:
|
||||||
|
pyodbc_found = False
|
||||||
|
else:
|
||||||
|
pyodbc_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CannotDropError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# module specific functions
|
||||||
|
|
||||||
|
def get_schema_facts(cursor, schema=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select schema_name, schema_owner, create_time
|
||||||
|
from schemata
|
||||||
|
where not is_system_schema and schema_name not in ('public', 'TxtIndex')
|
||||||
|
and (? = '' or schema_name ilike ?)
|
||||||
|
""", schema, schema)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
facts[row.schema_name.lower()] = {
|
||||||
|
'name': row.schema_name,
|
||||||
|
'owner': row.schema_owner,
|
||||||
|
'create_time': str(row.create_time),
|
||||||
|
'usage_roles': [],
|
||||||
|
'create_roles': []}
|
||||||
|
cursor.execute("""
|
||||||
|
select g.object_name as schema_name, r.name as role_name,
|
||||||
|
lower(g.privileges_description) privileges_description
|
||||||
|
from roles r join grants g
|
||||||
|
on g.grantee_id = r.role_id and g.object_type='SCHEMA'
|
||||||
|
and g.privileges_description like '%USAGE%'
|
||||||
|
and g.grantee not in ('public', 'dbadmin')
|
||||||
|
and (? = '' or g.object_name ilike ?)
|
||||||
|
""", schema, schema)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
schema_key = row.schema_name.lower()
|
||||||
|
if 'create' in row.privileges_description:
|
||||||
|
facts[schema_key]['create_roles'].append(row.role_name)
|
||||||
|
else:
|
||||||
|
facts[schema_key]['usage_roles'].append(row.role_name)
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def update_roles(schema_facts, cursor, schema,
|
||||||
|
existing, required,
|
||||||
|
create_existing, create_required):
|
||||||
|
for role in set(existing + create_existing) - set(required + create_required):
|
||||||
|
cursor.execute("drop role {0} cascade".format(role))
|
||||||
|
for role in set(create_existing) - set(create_required):
|
||||||
|
cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
|
||||||
|
for role in set(required + create_required) - set(existing + create_existing):
|
||||||
|
cursor.execute("create role {0}".format(role))
|
||||||
|
cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
|
||||||
|
for role in set(create_required) - set(create_existing):
|
||||||
|
cursor.execute("grant create on schema {0} to {1}".format(schema, role))
|
||||||
|
|
||||||
|
def check(schema_facts, schema, usage_roles, create_roles, owner):
|
||||||
|
schema_key = schema.lower()
|
||||||
|
if schema_key not in schema_facts:
|
||||||
|
return False
|
||||||
|
if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
|
||||||
|
return False
|
||||||
|
if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0:
|
||||||
|
return False
|
||||||
|
if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
||||||
|
schema_key = schema.lower()
|
||||||
|
if schema_key not in schema_facts:
|
||||||
|
query_fragments = ["create schema {0}".format(schema)]
|
||||||
|
if owner:
|
||||||
|
query_fragments.append("authorization {0}".format(owner))
|
||||||
|
cursor.execute(' '.join(query_fragments))
|
||||||
|
update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
|
||||||
|
schema_facts.update(get_schema_facts(cursor, schema))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
changed = False
|
||||||
|
if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
|
||||||
|
raise NotSupportedError((
|
||||||
|
"Changing schema owner is not supported. "
|
||||||
|
"Current owner: {0}."
|
||||||
|
).format(schema_facts[schema_key]['owner']))
|
||||||
|
if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \
|
||||||
|
cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0:
|
||||||
|
update_roles(schema_facts, cursor, schema,
|
||||||
|
schema_facts[schema_key]['usage_roles'], usage_roles,
|
||||||
|
schema_facts[schema_key]['create_roles'], create_roles)
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
schema_facts.update(get_schema_facts(cursor, schema))
|
||||||
|
return changed
|
||||||
|
|
||||||
|
def absent(schema_facts, cursor, schema, usage_roles, create_roles):
|
||||||
|
schema_key = schema.lower()
|
||||||
|
if schema_key in schema_facts:
|
||||||
|
update_roles(schema_facts, cursor, schema,
|
||||||
|
schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
|
||||||
|
try:
|
||||||
|
cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
|
||||||
|
except pyodbc.Error:
|
||||||
|
raise CannotDropError("Dropping schema failed due to dependencies.")
|
||||||
|
del schema_facts[schema_key]
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# module logic
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
schema=dict(required=True, aliases=['name']),
|
||||||
|
usage_roles=dict(default=None, aliases=['usage_role']),
|
||||||
|
create_roles=dict(default=None, aliases=['create_role']),
|
||||||
|
owner=dict(default=None),
|
||||||
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
|
db=dict(default=None),
|
||||||
|
cluster=dict(default='localhost'),
|
||||||
|
port=dict(default='5433'),
|
||||||
|
login_user=dict(default='dbadmin'),
|
||||||
|
login_password=dict(default=None),
|
||||||
|
), supports_check_mode = True)
|
||||||
|
|
||||||
|
if not pyodbc_found:
|
||||||
|
module.fail_json(msg="The python pyodbc module is required.")
|
||||||
|
|
||||||
|
schema = module.params['schema']
|
||||||
|
usage_roles = []
|
||||||
|
if module.params['usage_roles']:
|
||||||
|
usage_roles = module.params['usage_roles'].split(',')
|
||||||
|
usage_roles = filter(None, usage_roles)
|
||||||
|
create_roles = []
|
||||||
|
if module.params['create_roles']:
|
||||||
|
create_roles = module.params['create_roles'].split(',')
|
||||||
|
create_roles = filter(None, create_roles)
|
||||||
|
owner = module.params['owner']
|
||||||
|
state = module.params['state']
|
||||||
|
db = ''
|
||||||
|
if module.params['db']:
|
||||||
|
db = module.params['db']
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
dsn = (
|
||||||
|
"Driver=Vertica;"
|
||||||
|
"Server={0};"
|
||||||
|
"Port={1};"
|
||||||
|
"Database={2};"
|
||||||
|
"User={3};"
|
||||||
|
"Password={4};"
|
||||||
|
"ConnectionLoadBalance={5}"
|
||||||
|
).format(module.params['cluster'], module.params['port'], db,
|
||||||
|
module.params['login_user'], module.params['login_password'], 'true')
|
||||||
|
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||||
|
cursor = db_conn.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
schema_facts = get_schema_facts(cursor)
|
||||||
|
if module.check_mode:
|
||||||
|
changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
|
||||||
|
elif state == 'absent':
|
||||||
|
try:
|
||||||
|
changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
elif state == 'present':
|
||||||
|
try:
|
||||||
|
changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
|
||||||
|
except CannotDropError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
|
||||||
|
except SystemExit:
|
||||||
|
# avoid catching this on python 2.4
|
||||||
|
raise
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
|
||||||
|
|
||||||
|
# import ansible utilities
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,385 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: vertica_user
|
||||||
|
version_added: '2.0'
|
||||||
|
short_description: Adds or removes Vertica database users and assigns roles.
|
||||||
|
description:
|
||||||
|
- Adds or removes Vertica database user and, optionally, assigns roles.
|
||||||
|
- A user will not be removed until all the dependencies have been dropped.
|
||||||
|
- In such a situation, if the module tries to remove the user it
|
||||||
|
will fail and only remove roles granted to the user.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the user to add or remove.
|
||||||
|
required: true
|
||||||
|
profile:
|
||||||
|
description:
|
||||||
|
- Sets the user's profile.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
resource_pool:
|
||||||
|
description:
|
||||||
|
- Sets the user's resource pool.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- The user's password encrypted by the MD5 algorithm.
|
||||||
|
- The password must be generated with the format C("md5" + md5[password + username]),
|
||||||
|
resulting in a total of 35 characters. An easy way to do this is by querying
|
||||||
|
the Vertica database with select 'md5'||md5('<user_password><user_name>').
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
expired:
|
||||||
|
description:
|
||||||
|
- Sets the user's password expiration.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
ldap:
|
||||||
|
description:
|
||||||
|
- Set to true if users are authenticated via LDAP.
|
||||||
|
- The user will be created with password expired and set to I($ldap$).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
roles:
|
||||||
|
description:
|
||||||
|
- Comma separated list of roles to assign to the user.
|
||||||
|
aliases: ['role']
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent', 'locked']
|
||||||
|
default: present
|
||||||
|
db:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica database.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
cluster:
|
||||||
|
description:
|
||||||
|
- Name of the Vertica cluster.
|
||||||
|
required: false
|
||||||
|
default: localhost
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Vertica cluster port to connect to.
|
||||||
|
required: false
|
||||||
|
default: 5433
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: dbadmin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||||
|
to the C(dbadmin) account on the host.
|
||||||
|
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||||
|
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||||
|
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||||
|
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||||
|
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||||
|
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||||
|
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||||
|
author: Dariusz Owczarek
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: creating a new vertica user with password
|
||||||
|
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
|
||||||
|
|
||||||
|
- name: creating a new vertica user authenticated via ldap with roles assigned
|
||||||
|
vertica_user:
|
||||||
|
name=user_name
|
||||||
|
ldap=true
|
||||||
|
db=db_name
|
||||||
|
roles=schema_name_ro
|
||||||
|
state=present
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pyodbc
|
||||||
|
except ImportError:
|
||||||
|
pyodbc_found = False
|
||||||
|
else:
|
||||||
|
pyodbc_found = True
|
||||||
|
|
||||||
|
class NotSupportedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CannotDropError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# module specific functions
|
||||||
|
|
||||||
|
def get_user_facts(cursor, user=''):
|
||||||
|
facts = {}
|
||||||
|
cursor.execute("""
|
||||||
|
select u.user_name, u.is_locked, u.lock_time,
|
||||||
|
p.password, p.acctexpired as is_expired,
|
||||||
|
u.profile_name, u.resource_pool,
|
||||||
|
u.all_roles, u.default_roles
|
||||||
|
from users u join password_auditor p on p.user_id = u.user_id
|
||||||
|
where not u.is_super_user
|
||||||
|
and (? = '' or u.user_name ilike ?)
|
||||||
|
""", user, user)
|
||||||
|
while True:
|
||||||
|
rows = cursor.fetchmany(100)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
for row in rows:
|
||||||
|
user_key = row.user_name.lower()
|
||||||
|
facts[user_key] = {
|
||||||
|
'name': row.user_name,
|
||||||
|
'locked': str(row.is_locked),
|
||||||
|
'password': row.password,
|
||||||
|
'expired': str(row.is_expired),
|
||||||
|
'profile': row.profile_name,
|
||||||
|
'resource_pool': row.resource_pool,
|
||||||
|
'roles': [],
|
||||||
|
'default_roles': []}
|
||||||
|
if row.is_locked:
|
||||||
|
facts[user_key]['locked_time'] = str(row.lock_time)
|
||||||
|
if row.all_roles:
|
||||||
|
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
|
||||||
|
if row.default_roles:
|
||||||
|
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
|
||||||
|
return facts
|
||||||
|
|
||||||
|
def update_roles(user_facts, cursor, user,
|
||||||
|
existing_all, existing_default, required):
|
||||||
|
del_roles = list(set(existing_all) - set(required))
|
||||||
|
if del_roles:
|
||||||
|
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
|
||||||
|
new_roles = list(set(required) - set(existing_all))
|
||||||
|
if new_roles:
|
||||||
|
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
|
||||||
|
if required:
|
||||||
|
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
|
||||||
|
|
||||||
|
def check(user_facts, user, profile, resource_pool,
|
||||||
|
locked, password, expired, ldap, roles):
|
||||||
|
user_key = user.lower()
|
||||||
|
if user_key not in user_facts:
|
||||||
|
return False
|
||||||
|
if profile and profile != user_facts[user_key]['profile']:
|
||||||
|
return False
|
||||||
|
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
|
||||||
|
return False
|
||||||
|
if locked != (user_facts[user_key]['locked'] == 'True'):
|
||||||
|
return False
|
||||||
|
if password and password != user_facts[user_key]['password']:
|
||||||
|
return False
|
||||||
|
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
|
||||||
|
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
|
||||||
|
return False
|
||||||
|
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
|
||||||
|
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def present(user_facts, cursor, user, profile, resource_pool,
|
||||||
|
locked, password, expired, ldap, roles):
|
||||||
|
user_key = user.lower()
|
||||||
|
if user_key not in user_facts:
|
||||||
|
query_fragments = ["create user {0}".format(user)]
|
||||||
|
if locked:
|
||||||
|
query_fragments.append("account lock")
|
||||||
|
if password or ldap:
|
||||||
|
if password:
|
||||||
|
query_fragments.append("identified by '{0}'".format(password))
|
||||||
|
else:
|
||||||
|
query_fragments.append("identified by '$ldap$'")
|
||||||
|
if expired or ldap:
|
||||||
|
query_fragments.append("password expire")
|
||||||
|
if profile:
|
||||||
|
query_fragments.append("profile {0}".format(profile))
|
||||||
|
if resource_pool:
|
||||||
|
query_fragments.append("resource pool {0}".format(resource_pool))
|
||||||
|
cursor.execute(' '.join(query_fragments))
|
||||||
|
if resource_pool and resource_pool != 'general':
|
||||||
|
cursor.execute("grant usage on resource pool {0} to {1}".format(
|
||||||
|
resource_pool, user))
|
||||||
|
update_roles(user_facts, cursor, user, [], [], roles)
|
||||||
|
user_facts.update(get_user_facts(cursor, user))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
changed = False
|
||||||
|
query_fragments = ["alter user {0}".format(user)]
|
||||||
|
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
|
||||||
|
state = 'lock' if locked else 'unlock'
|
||||||
|
query_fragments.append("account {0}".format(state))
|
||||||
|
changed = True
|
||||||
|
if password and password != user_facts[user_key]['password']:
|
||||||
|
query_fragments.append("identified by '{0}'".format(password))
|
||||||
|
changed = True
|
||||||
|
if ldap:
|
||||||
|
if ldap != (user_facts[user_key]['expired'] == 'True'):
|
||||||
|
query_fragments.append("password expire")
|
||||||
|
changed = True
|
||||||
|
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
|
||||||
|
if expired:
|
||||||
|
query_fragments.append("password expire")
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
raise NotSupportedError("Unexpiring user password is not supported.")
|
||||||
|
if profile and profile != user_facts[user_key]['profile']:
|
||||||
|
query_fragments.append("profile {0}".format(profile))
|
||||||
|
changed = True
|
||||||
|
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
|
||||||
|
query_fragments.append("resource pool {0}".format(resource_pool))
|
||||||
|
if user_facts[user_key]['resource_pool'] != 'general':
|
||||||
|
cursor.execute("revoke usage on resource pool {0} from {1}".format(
|
||||||
|
user_facts[user_key]['resource_pool'], user))
|
||||||
|
if resource_pool != 'general':
|
||||||
|
cursor.execute("grant usage on resource pool {0} to {1}".format(
|
||||||
|
resource_pool, user))
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
cursor.execute(' '.join(query_fragments))
|
||||||
|
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
|
||||||
|
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
|
||||||
|
update_roles(user_facts, cursor, user,
|
||||||
|
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
user_facts.update(get_user_facts(cursor, user))
|
||||||
|
return changed
|
||||||
|
|
||||||
|
def absent(user_facts, cursor, user, roles):
|
||||||
|
user_key = user.lower()
|
||||||
|
if user_key in user_facts:
|
||||||
|
update_roles(user_facts, cursor, user,
|
||||||
|
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
|
||||||
|
try:
|
||||||
|
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
|
||||||
|
except pyodbc.Error:
|
||||||
|
raise CannotDropError("Dropping user failed due to dependencies.")
|
||||||
|
del user_facts[user_key]
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# module logic
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
user=dict(required=True, aliases=['name']),
|
||||||
|
profile=dict(default=None),
|
||||||
|
resource_pool=dict(default=None),
|
||||||
|
password=dict(default=None),
|
||||||
|
expired=dict(type='bool', default=None),
|
||||||
|
ldap=dict(type='bool', default=None),
|
||||||
|
roles=dict(default=None, aliases=['role']),
|
||||||
|
state=dict(default='present', choices=['absent', 'present', 'locked']),
|
||||||
|
db=dict(default=None),
|
||||||
|
cluster=dict(default='localhost'),
|
||||||
|
port=dict(default='5433'),
|
||||||
|
login_user=dict(default='dbadmin'),
|
||||||
|
login_password=dict(default=None),
|
||||||
|
), supports_check_mode = True)
|
||||||
|
|
||||||
|
if not pyodbc_found:
|
||||||
|
module.fail_json(msg="The python pyodbc module is required.")
|
||||||
|
|
||||||
|
user = module.params['user']
|
||||||
|
profile = module.params['profile']
|
||||||
|
if profile:
|
||||||
|
profile = profile.lower()
|
||||||
|
resource_pool = module.params['resource_pool']
|
||||||
|
if resource_pool:
|
||||||
|
resource_pool = resource_pool.lower()
|
||||||
|
password = module.params['password']
|
||||||
|
expired = module.params['expired']
|
||||||
|
ldap = module.params['ldap']
|
||||||
|
roles = []
|
||||||
|
if module.params['roles']:
|
||||||
|
roles = module.params['roles'].split(',')
|
||||||
|
roles = filter(None, roles)
|
||||||
|
state = module.params['state']
|
||||||
|
if state == 'locked':
|
||||||
|
locked = True
|
||||||
|
else:
|
||||||
|
locked = False
|
||||||
|
db = ''
|
||||||
|
if module.params['db']:
|
||||||
|
db = module.params['db']
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
dsn = (
|
||||||
|
"Driver=Vertica;"
|
||||||
|
"Server={0};"
|
||||||
|
"Port={1};"
|
||||||
|
"Database={2};"
|
||||||
|
"User={3};"
|
||||||
|
"Password={4};"
|
||||||
|
"ConnectionLoadBalance={5}"
|
||||||
|
).format(module.params['cluster'], module.params['port'], db,
|
||||||
|
module.params['login_user'], module.params['login_password'], 'true')
|
||||||
|
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||||
|
cursor = db_conn.cursor()
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
user_facts = get_user_facts(cursor)
|
||||||
|
if module.check_mode:
|
||||||
|
changed = not check(user_facts, user, profile, resource_pool,
|
||||||
|
locked, password, expired, ldap, roles)
|
||||||
|
elif state == 'absent':
|
||||||
|
try:
|
||||||
|
changed = absent(user_facts, cursor, user, roles)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
elif state in ['present', 'locked']:
|
||||||
|
try:
|
||||||
|
changed = present(user_facts, cursor, user, profile, resource_pool,
|
||||||
|
locked, password, expired, ldap, roles)
|
||||||
|
except pyodbc.Error, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
except NotSupportedError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
|
||||||
|
except CannotDropError, e:
|
||||||
|
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
|
||||||
|
except SystemExit:
|
||||||
|
# avoid catching this on python 2.4
|
||||||
|
raise
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
|
||||||
|
|
||||||
|
# import ansible utilities
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,167 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
|
||||||
|
# (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: patch
|
||||||
|
author: Luis Alberto Perez Lazaro, Jakub Jirutka
|
||||||
|
version_added: 1.9
|
||||||
|
description:
|
||||||
|
- Apply patch files using the GNU patch tool.
|
||||||
|
short_description: Apply patch files using the GNU patch tool.
|
||||||
|
options:
|
||||||
|
basedir:
|
||||||
|
description:
|
||||||
|
- Path of a base directory in which the patch file will be applied.
|
||||||
|
May be ommitted when C(dest) option is specified, otherwise required.
|
||||||
|
required: false
|
||||||
|
dest:
|
||||||
|
description:
|
||||||
|
- Path of the file on the remote machine to be patched.
|
||||||
|
- The names of the files to be patched are usually taken from the patch
|
||||||
|
file, but if there's just one file to be patched it can specified with
|
||||||
|
this option.
|
||||||
|
required: false
|
||||||
|
aliases: [ "originalfile" ]
|
||||||
|
src:
|
||||||
|
description:
|
||||||
|
- Path of the patch file as accepted by the GNU patch tool. If
|
||||||
|
C(remote_src) is False, the patch source file is looked up from the
|
||||||
|
module's "files" directory.
|
||||||
|
required: true
|
||||||
|
aliases: [ "patchfile" ]
|
||||||
|
remote_src:
|
||||||
|
description:
|
||||||
|
- If False, it will search for src at originating/master machine, if True it will
|
||||||
|
go to the remote/target machine for the src. Default is False.
|
||||||
|
choices: [ "True", "False" ]
|
||||||
|
required: false
|
||||||
|
default: "False"
|
||||||
|
strip:
|
||||||
|
description:
|
||||||
|
- Number that indicates the smallest prefix containing leading slashes
|
||||||
|
that will be stripped from each file name found in the patch file.
|
||||||
|
For more information see the strip parameter of the GNU patch tool.
|
||||||
|
required: false
|
||||||
|
type: "int"
|
||||||
|
default: "0"
|
||||||
|
note:
|
||||||
|
- This module requires GNU I(patch) utility to be installed on the remote host.
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: apply patch to one file
|
||||||
|
patch: >
|
||||||
|
src=/tmp/index.html.patch
|
||||||
|
dest=/var/www/index.html
|
||||||
|
|
||||||
|
- name: apply patch to multiple files under basedir
|
||||||
|
patch: >
|
||||||
|
src=/tmp/customize.patch
|
||||||
|
basedir=/var/www
|
||||||
|
strip=1
|
||||||
|
'''
|
||||||
|
|
||||||
|
import os
|
||||||
|
from os import path, R_OK, W_OK
|
||||||
|
|
||||||
|
|
||||||
|
class PatchError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0):
|
||||||
|
opts = ['--quiet', '--reverse', '--forward', '--dry-run',
|
||||||
|
"--strip=%s" % strip, "--directory='%s'" % basedir,
|
||||||
|
"--input='%s'" % patch_file]
|
||||||
|
if dest_file:
|
||||||
|
opts.append("'%s'" % dest_file)
|
||||||
|
|
||||||
|
(rc, _, _) = patch_func(opts)
|
||||||
|
return rc == 0
|
||||||
|
|
||||||
|
|
||||||
|
def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False):
|
||||||
|
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
|
||||||
|
"--strip=%s" % strip, "--directory='%s'" % basedir,
|
||||||
|
"--input='%s'" % patch_file]
|
||||||
|
if dry_run:
|
||||||
|
opts.append('--dry-run')
|
||||||
|
if dest_file:
|
||||||
|
opts.append("'%s'" % dest_file)
|
||||||
|
|
||||||
|
(rc, out, err) = patch_func(opts)
|
||||||
|
if rc != 0:
|
||||||
|
msg = out if not err else err
|
||||||
|
raise PatchError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec={
|
||||||
|
'src': {'required': True, 'aliases': ['patchfile']},
|
||||||
|
'dest': {'aliases': ['originalfile']},
|
||||||
|
'basedir': {},
|
||||||
|
'strip': {'default': 0, 'type': 'int'},
|
||||||
|
'remote_src': {'default': False, 'type': 'bool'},
|
||||||
|
},
|
||||||
|
required_one_of=[['dest', 'basedir']],
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create type object as namespace for module params
|
||||||
|
p = type('Params', (), module.params)
|
||||||
|
|
||||||
|
p.src = os.path.expanduser(p.src)
|
||||||
|
if not os.access(p.src, R_OK):
|
||||||
|
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
|
||||||
|
|
||||||
|
if p.dest and not os.access(p.dest, W_OK):
|
||||||
|
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
|
||||||
|
|
||||||
|
if p.basedir and not path.exists(p.basedir):
|
||||||
|
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
|
||||||
|
|
||||||
|
if not p.basedir:
|
||||||
|
p.basedir = path.dirname(p.dest)
|
||||||
|
|
||||||
|
patch_bin = module.get_bin_path('patch')
|
||||||
|
if patch_bin is None:
|
||||||
|
module.fail_json(msg="patch command not found")
|
||||||
|
patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts)))
|
||||||
|
|
||||||
|
# patch need an absolute file name
|
||||||
|
p.src = os.path.abspath(p.src)
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip):
|
||||||
|
try:
|
||||||
|
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip,
|
||||||
|
dry_run=module.check_mode)
|
||||||
|
changed = True
|
||||||
|
except PatchError, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,451 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2013-2014, Epic Games, Inc.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: zabbix_host
|
||||||
|
short_description: Zabbix host creates/updates/deletes
|
||||||
|
description:
|
||||||
|
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
|
||||||
|
version_added: "2.0"
|
||||||
|
author: Tony Minfei Ding, Harrison Gu
|
||||||
|
requirements:
|
||||||
|
- zabbix-api python module
|
||||||
|
options:
|
||||||
|
server_url:
|
||||||
|
description:
|
||||||
|
- Url of Zabbix server, with protocol (http or https).
|
||||||
|
required: true
|
||||||
|
aliases: [ "url" ]
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- Zabbix user name, used to authenticate against the server.
|
||||||
|
required: true
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- Zabbix user password.
|
||||||
|
required: true
|
||||||
|
host_name:
|
||||||
|
description:
|
||||||
|
- Name of the host in Zabbix.
|
||||||
|
- host_name is the unique identifier used and cannot be updated using this module.
|
||||||
|
required: true
|
||||||
|
host_groups:
|
||||||
|
description:
|
||||||
|
- List of host groups the host is part of.
|
||||||
|
required: false
|
||||||
|
link_templates:
|
||||||
|
description:
|
||||||
|
- List of templates linked to the host.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
status:
|
||||||
|
description:
|
||||||
|
- 'Monitoring status of the host. Possible values are: "enabled" and "disabled".'
|
||||||
|
required: false
|
||||||
|
default: "enabled"
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.'
|
||||||
|
required: false
|
||||||
|
default: "present"
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The timeout of API request(seconds).
|
||||||
|
default: 10
|
||||||
|
interfaces:
|
||||||
|
description:
|
||||||
|
- List of interfaces to be created for the host (see example below).
|
||||||
|
- 'Available values are: dns, ip, main, port, type and useip.'
|
||||||
|
- Please review the interface documentation for more information on the supported properties
|
||||||
|
- https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface
|
||||||
|
required: false
|
||||||
|
default: []
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Create a new host or update an existing host's info
|
||||||
|
local_action:
|
||||||
|
module: zabbix_host
|
||||||
|
server_url: http://monitor.example.com
|
||||||
|
login_user: username
|
||||||
|
login_password: password
|
||||||
|
host_name: ExampleHost
|
||||||
|
host_groups:
|
||||||
|
- Example group1
|
||||||
|
- Example group2
|
||||||
|
link_templates:
|
||||||
|
- Example template1
|
||||||
|
- Example template2
|
||||||
|
status: enabled
|
||||||
|
state: present
|
||||||
|
interfaces:
|
||||||
|
- type: 1
|
||||||
|
main: 1
|
||||||
|
useip: 1
|
||||||
|
ip: 10.xx.xx.xx
|
||||||
|
dns: ""
|
||||||
|
port: 10050
|
||||||
|
- type: 4
|
||||||
|
main: 1
|
||||||
|
useip: 1
|
||||||
|
ip: 10.xx.xx.xx
|
||||||
|
dns: ""
|
||||||
|
port: 12345
|
||||||
|
'''
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import copy
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
try:
|
||||||
|
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||||
|
|
||||||
|
HAS_ZABBIX_API = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_ZABBIX_API = False
|
||||||
|
|
||||||
|
|
||||||
|
# Extend the ZabbixAPI
|
||||||
|
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
|
||||||
|
# it does not support the 'hostinterface' api calls,
|
||||||
|
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
|
||||||
|
class ZabbixAPIExtends(ZabbixAPI):
|
||||||
|
hostinterface = None
|
||||||
|
|
||||||
|
def __init__(self, server, timeout, **kwargs):
|
||||||
|
ZabbixAPI.__init__(self, server, timeout=timeout)
|
||||||
|
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
class Host(object):
|
||||||
|
def __init__(self, module, zbx):
|
||||||
|
self._module = module
|
||||||
|
self._zapi = zbx
|
||||||
|
|
||||||
|
# exist host
|
||||||
|
def is_host_exist(self, host_name):
|
||||||
|
result = self._zapi.host.exists({'host': host_name})
|
||||||
|
return result
|
||||||
|
|
||||||
|
# check if host group exists
|
||||||
|
def check_host_group_exist(self, group_names):
|
||||||
|
for group_name in group_names:
|
||||||
|
result = self._zapi.hostgroup.exists({'name': group_name})
|
||||||
|
if not result:
|
||||||
|
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_template_ids(self, template_list):
|
||||||
|
template_ids = []
|
||||||
|
if template_list is None or len(template_list) == 0:
|
||||||
|
return template_ids
|
||||||
|
for template in template_list:
|
||||||
|
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
|
||||||
|
if len(template_list) < 1:
|
||||||
|
self._module.fail_json(msg="Template not found: %s" % template)
|
||||||
|
else:
|
||||||
|
template_id = template_list[0]['templateid']
|
||||||
|
template_ids.append(template_id)
|
||||||
|
return template_ids
|
||||||
|
|
||||||
|
def add_host(self, host_name, group_ids, status, interfaces):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status})
|
||||||
|
if len(host_list) >= 1:
|
||||||
|
return host_list['hostids'][0]
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
|
||||||
|
|
||||||
|
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status})
|
||||||
|
interface_list_copy = exist_interface_list
|
||||||
|
if interfaces:
|
||||||
|
for interface in interfaces:
|
||||||
|
flag = False
|
||||||
|
interface_str = interface
|
||||||
|
for exist_interface in exist_interface_list:
|
||||||
|
interface_type = interface['type']
|
||||||
|
exist_interface_type = int(exist_interface['type'])
|
||||||
|
if interface_type == exist_interface_type:
|
||||||
|
# update
|
||||||
|
interface_str['interfaceid'] = exist_interface['interfaceid']
|
||||||
|
self._zapi.hostinterface.update(interface_str)
|
||||||
|
flag = True
|
||||||
|
interface_list_copy.remove(exist_interface)
|
||||||
|
break
|
||||||
|
if not flag:
|
||||||
|
# add
|
||||||
|
interface_str['hostid'] = host_id
|
||||||
|
self._zapi.hostinterface.create(interface_str)
|
||||||
|
# remove
|
||||||
|
remove_interface_ids = []
|
||||||
|
for remove_interface in interface_list_copy:
|
||||||
|
interface_id = remove_interface['interfaceid']
|
||||||
|
remove_interface_ids.append(interface_id)
|
||||||
|
if len(remove_interface_ids) > 0:
|
||||||
|
self._zapi.hostinterface.delete(remove_interface_ids)
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
|
||||||
|
|
||||||
|
def delete_host(self, host_id, host_name):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.host.delete({'hostid': host_id})
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
|
||||||
|
|
||||||
|
# get host by host name
|
||||||
|
def get_host_by_host_name(self, host_name):
|
||||||
|
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
|
||||||
|
if len(host_list) < 1:
|
||||||
|
self._module.fail_json(msg="Host not found: %s" % host_name)
|
||||||
|
else:
|
||||||
|
return host_list[0]
|
||||||
|
|
||||||
|
# get group ids by group names
|
||||||
|
def get_group_ids_by_group_names(self, group_names):
|
||||||
|
group_ids = []
|
||||||
|
if self.check_host_group_exist(group_names):
|
||||||
|
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
|
||||||
|
for group in group_list:
|
||||||
|
group_id = group['groupid']
|
||||||
|
group_ids.append({'groupid': group_id})
|
||||||
|
return group_ids
|
||||||
|
|
||||||
|
# get host templates by host id
|
||||||
|
def get_host_templates_by_host_id(self, host_id):
|
||||||
|
template_ids = []
|
||||||
|
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
|
||||||
|
for template in template_list:
|
||||||
|
template_ids.append(template['templateid'])
|
||||||
|
return template_ids
|
||||||
|
|
||||||
|
# get host groups by host id
|
||||||
|
def get_host_groups_by_host_id(self, host_id):
|
||||||
|
exist_host_groups = []
|
||||||
|
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
|
||||||
|
|
||||||
|
if len(host_groups_list) >= 1:
|
||||||
|
for host_groups_name in host_groups_list:
|
||||||
|
exist_host_groups.append(host_groups_name['name'])
|
||||||
|
return exist_host_groups
|
||||||
|
|
||||||
|
# check the exist_interfaces whether it equals the interfaces or not
|
||||||
|
def check_interface_properties(self, exist_interface_list, interfaces):
|
||||||
|
interfaces_port_list = []
|
||||||
|
if len(interfaces) >= 1:
|
||||||
|
for interface in interfaces:
|
||||||
|
interfaces_port_list.append(int(interface['port']))
|
||||||
|
|
||||||
|
exist_interface_ports = []
|
||||||
|
if len(exist_interface_list) >= 1:
|
||||||
|
for exist_interface in exist_interface_list:
|
||||||
|
exist_interface_ports.append(int(exist_interface['port']))
|
||||||
|
|
||||||
|
if set(interfaces_port_list) != set(exist_interface_ports):
|
||||||
|
return True
|
||||||
|
|
||||||
|
for exist_interface in exist_interface_list:
|
||||||
|
exit_interface_port = int(exist_interface['port'])
|
||||||
|
for interface in interfaces:
|
||||||
|
interface_port = int(interface['port'])
|
||||||
|
if interface_port == exit_interface_port:
|
||||||
|
for key in interface.keys():
|
||||||
|
if str(exist_interface[key]) != str(interface[key]):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
# get the status of host by host
|
||||||
|
def get_host_status_by_host(self, host):
|
||||||
|
return host['status']
|
||||||
|
|
||||||
|
# check all the properties before link or clear template
|
||||||
|
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
|
||||||
|
exist_interfaces, host):
|
||||||
|
# get the existing host's groups
|
||||||
|
exist_host_groups = self.get_host_groups_by_host_id(host_id)
|
||||||
|
if set(host_groups) != set(exist_host_groups):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# get the existing status
|
||||||
|
exist_status = self.get_host_status_by_host(host)
|
||||||
|
if int(status) != int(exist_status):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# check the exist_interfaces whether it equals the interfaces or not
|
||||||
|
if self.check_interface_properties(exist_interfaces, interfaces):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# get the existing templates
|
||||||
|
exist_template_ids = self.get_host_templates_by_host_id(host_id)
|
||||||
|
if set(list(template_ids)) != set(exist_template_ids):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
# link or clear template of the host
|
||||||
|
def link_or_clear_template(self, host_id, template_id_list):
|
||||||
|
# get host's exist template ids
|
||||||
|
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
|
||||||
|
|
||||||
|
exist_template_ids = set(exist_template_id_list)
|
||||||
|
template_ids = set(template_id_list)
|
||||||
|
template_id_list = list(template_ids)
|
||||||
|
|
||||||
|
# get unlink and clear templates
|
||||||
|
templates_clear = exist_template_ids.difference(template_ids)
|
||||||
|
templates_clear_list = list(templates_clear)
|
||||||
|
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.host.update(request_str)
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to link template to host: %s" % e)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
server_url=dict(required=True, aliases=['url']),
|
||||||
|
login_user=dict(required=True),
|
||||||
|
login_password=dict(required=True),
|
||||||
|
host_name=dict(required=True),
|
||||||
|
host_groups=dict(required=False),
|
||||||
|
link_templates=dict(required=False),
|
||||||
|
status=dict(default="enabled"),
|
||||||
|
state=dict(default="present"),
|
||||||
|
timeout=dict(default=10),
|
||||||
|
interfaces=dict(required=False)
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_ZABBIX_API:
|
||||||
|
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
|
||||||
|
|
||||||
|
server_url = module.params['server_url']
|
||||||
|
login_user = module.params['login_user']
|
||||||
|
login_password = module.params['login_password']
|
||||||
|
host_name = module.params['host_name']
|
||||||
|
host_groups = module.params['host_groups']
|
||||||
|
link_templates = module.params['link_templates']
|
||||||
|
status = module.params['status']
|
||||||
|
state = module.params['state']
|
||||||
|
timeout = module.params['timeout']
|
||||||
|
interfaces = module.params['interfaces']
|
||||||
|
|
||||||
|
# convert enabled to 0; disabled to 1
|
||||||
|
status = 1 if status == "disabled" else 0
|
||||||
|
|
||||||
|
zbx = None
|
||||||
|
# login to zabbix
|
||||||
|
try:
|
||||||
|
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
|
||||||
|
zbx.login(login_user, login_password)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
|
||||||
|
|
||||||
|
host = Host(module, zbx)
|
||||||
|
|
||||||
|
template_ids = []
|
||||||
|
if link_templates:
|
||||||
|
template_ids = host.get_template_ids(link_templates)
|
||||||
|
|
||||||
|
group_ids = []
|
||||||
|
|
||||||
|
if host_groups:
|
||||||
|
group_ids = host.get_group_ids_by_group_names(host_groups)
|
||||||
|
|
||||||
|
ip = ""
|
||||||
|
if interfaces:
|
||||||
|
for interface in interfaces:
|
||||||
|
if interface['type'] == 1:
|
||||||
|
ip = interface['ip']
|
||||||
|
|
||||||
|
# check if host exist
|
||||||
|
is_host_exist = host.is_host_exist(host_name)
|
||||||
|
|
||||||
|
if is_host_exist:
|
||||||
|
# get host id by host name
|
||||||
|
zabbix_host_obj = host.get_host_by_host_name(host_name)
|
||||||
|
host_id = zabbix_host_obj['hostid']
|
||||||
|
|
||||||
|
if state == "absent":
|
||||||
|
# remove host
|
||||||
|
host.delete_host(host_id, host_name)
|
||||||
|
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
|
||||||
|
else:
|
||||||
|
if not group_ids:
|
||||||
|
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
|
||||||
|
|
||||||
|
# get exist host's interfaces
|
||||||
|
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
|
||||||
|
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
|
||||||
|
|
||||||
|
# update host
|
||||||
|
interfaces_len = len(interfaces) if interfaces else 0
|
||||||
|
|
||||||
|
if len(exist_interfaces) > interfaces_len:
|
||||||
|
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
|
||||||
|
exist_interfaces, zabbix_host_obj):
|
||||||
|
host.link_or_clear_template(host_id, template_ids)
|
||||||
|
host.update_host(host_name, group_ids, status, host_id,
|
||||||
|
interfaces, exist_interfaces)
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
result="Successfully update host %s (%s) and linked with template '%s'"
|
||||||
|
% (host_name, ip, link_templates))
|
||||||
|
else:
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
else:
|
||||||
|
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
|
||||||
|
exist_interfaces_copy, zabbix_host_obj):
|
||||||
|
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces)
|
||||||
|
host.link_or_clear_template(host_id, template_ids)
|
||||||
|
module.exit_json(changed=True,
|
||||||
|
result="Successfully update host %s (%s) and linked with template '%s'"
|
||||||
|
% (host_name, ip, link_templates))
|
||||||
|
else:
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
else:
|
||||||
|
if not group_ids:
|
||||||
|
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
|
||||||
|
|
||||||
|
if not interfaces or (interfaces and len(interfaces) == 0):
|
||||||
|
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
|
||||||
|
|
||||||
|
# create host
|
||||||
|
host_id = host.add_host(host_name, group_ids, status, interfaces)
|
||||||
|
host.link_or_clear_template(host_id, template_ids)
|
||||||
|
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
|
||||||
|
host_name, ip, link_templates))
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,225 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2013-2014, Epic Games, Inc.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: zabbix_hostmacro
|
||||||
|
short_description: Zabbix host macro creates/updates/deletes
|
||||||
|
description:
|
||||||
|
- manages Zabbix host macros, it can create, update or delete them.
|
||||||
|
version_added: "2.0"
|
||||||
|
author: Dean Hailin Song
|
||||||
|
requirements:
|
||||||
|
- zabbix-api python module
|
||||||
|
options:
|
||||||
|
server_url:
|
||||||
|
description:
|
||||||
|
- Url of Zabbix server, with protocol (http or https).
|
||||||
|
required: true
|
||||||
|
aliases: [ "url" ]
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- Zabbix user name.
|
||||||
|
required: true
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- Zabbix user password.
|
||||||
|
required: true
|
||||||
|
host_name:
|
||||||
|
description:
|
||||||
|
- Name of the host.
|
||||||
|
required: true
|
||||||
|
macro_name:
|
||||||
|
description:
|
||||||
|
- Name of the host macro.
|
||||||
|
required: true
|
||||||
|
macro_value:
|
||||||
|
description:
|
||||||
|
- Value of the host macro.
|
||||||
|
required: true
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.'
|
||||||
|
required: false
|
||||||
|
default: "present"
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The timeout of API request(seconds).
|
||||||
|
default: 10
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Create a new host macro or update an existing macro's value
|
||||||
|
local_action:
|
||||||
|
module: zabbix_hostmacro
|
||||||
|
server_url: http://monitor.example.com
|
||||||
|
login_user: username
|
||||||
|
login_password: password
|
||||||
|
host_name: ExampleHost
|
||||||
|
macro_name:Example macro
|
||||||
|
macro_value:Example value
|
||||||
|
state: present
|
||||||
|
'''
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import copy
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
try:
|
||||||
|
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||||
|
|
||||||
|
HAS_ZABBIX_API = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_ZABBIX_API = False
|
||||||
|
|
||||||
|
|
||||||
|
# Extend the ZabbixAPI
|
||||||
|
# Since the zabbix-api python module too old (version 1.0, no higher version so far).
|
||||||
|
class ZabbixAPIExtends(ZabbixAPI):
|
||||||
|
def __init__(self, server, timeout, **kwargs):
|
||||||
|
ZabbixAPI.__init__(self, server, timeout=timeout)
|
||||||
|
|
||||||
|
|
||||||
|
class HostMacro(object):
|
||||||
|
def __init__(self, module, zbx):
|
||||||
|
self._module = module
|
||||||
|
self._zapi = zbx
|
||||||
|
|
||||||
|
# exist host
|
||||||
|
def is_host_exist(self, host_name):
|
||||||
|
result = self._zapi.host.exists({'host': host_name})
|
||||||
|
return result
|
||||||
|
|
||||||
|
# get host id by host name
|
||||||
|
def get_host_id(self, host_name):
|
||||||
|
try:
|
||||||
|
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
|
||||||
|
if len(host_list) < 1:
|
||||||
|
self._module.fail_json(msg="Host not found: %s" % host_name)
|
||||||
|
else:
|
||||||
|
host_id = host_list[0]['hostid']
|
||||||
|
return host_id
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
|
||||||
|
|
||||||
|
# get host macro
|
||||||
|
def get_host_macro(self, macro_name, host_id):
|
||||||
|
try:
|
||||||
|
host_macro_list = self._zapi.usermacro.get(
|
||||||
|
{"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
|
||||||
|
if len(host_macro_list) > 0:
|
||||||
|
return host_macro_list[0]
|
||||||
|
return None
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
|
||||||
|
|
||||||
|
# create host macro
|
||||||
|
def create_host_macro(self, macro_name, macro_value, host_id):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
|
||||||
|
self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
|
||||||
|
|
||||||
|
# update host macro
|
||||||
|
def update_host_macro(self, host_macro_obj, macro_name, macro_value):
|
||||||
|
host_macro_id = host_macro_obj['hostmacroid']
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
|
||||||
|
self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
|
||||||
|
|
||||||
|
# delete host macro
|
||||||
|
def delete_host_macro(self, host_macro_obj, macro_name):
|
||||||
|
host_macro_id = host_macro_obj['hostmacroid']
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.usermacro.delete([host_macro_id])
|
||||||
|
self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
|
||||||
|
except Exception, e:
|
||||||
|
self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
server_url=dict(required=True, aliases=['url']),
|
||||||
|
login_user=dict(required=True),
|
||||||
|
login_password=dict(required=True),
|
||||||
|
host_name=dict(required=True),
|
||||||
|
macro_name=dict(required=True),
|
||||||
|
macro_value=dict(required=True),
|
||||||
|
state=dict(default="present"),
|
||||||
|
timeout=dict(default=10)
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_ZABBIX_API:
|
||||||
|
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
|
||||||
|
|
||||||
|
server_url = module.params['server_url']
|
||||||
|
login_user = module.params['login_user']
|
||||||
|
login_password = module.params['login_password']
|
||||||
|
host_name = module.params['host_name']
|
||||||
|
macro_name = (module.params['macro_name']).upper()
|
||||||
|
macro_value = module.params['macro_value']
|
||||||
|
state = module.params['state']
|
||||||
|
timeout = module.params['timeout']
|
||||||
|
|
||||||
|
zbx = None
|
||||||
|
# login to zabbix
|
||||||
|
try:
|
||||||
|
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
|
||||||
|
zbx.login(login_user, login_password)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
|
||||||
|
|
||||||
|
host_macro_class_obj = HostMacro(module, zbx)
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
if host_name:
|
||||||
|
host_id = host_macro_class_obj.get_host_id(host_name)
|
||||||
|
host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
|
||||||
|
|
||||||
|
if state == 'absent':
|
||||||
|
if not host_macro_obj:
|
||||||
|
module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
|
||||||
|
else:
|
||||||
|
# delete a macro
|
||||||
|
host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
|
||||||
|
else:
|
||||||
|
if not host_macro_obj:
|
||||||
|
# create host macro
|
||||||
|
host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
|
||||||
|
else:
|
||||||
|
# update host macro
|
||||||
|
host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,415 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2013-2014, Epic Games, Inc.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: zabbix_screen
|
||||||
|
short_description: Zabbix screen creates/updates/deletes
|
||||||
|
description:
|
||||||
|
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
|
||||||
|
version_added: "2.0"
|
||||||
|
author: Tony Minfei Ding, Harrison Gu
|
||||||
|
requirements:
|
||||||
|
- zabbix-api python module
|
||||||
|
options:
|
||||||
|
server_url:
|
||||||
|
description:
|
||||||
|
- Url of Zabbix server, with protocol (http or https).
|
||||||
|
required: true
|
||||||
|
aliases: [ "url" ]
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- Zabbix user name.
|
||||||
|
required: true
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- Zabbix user password.
|
||||||
|
required: true
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The timeout of API request(seconds).
|
||||||
|
default: 10
|
||||||
|
zabbix_screens:
|
||||||
|
description:
|
||||||
|
- List of screens to be created/updated/deleted(see example).
|
||||||
|
- If the screen(s) already been added, the screen(s) name won't be updated.
|
||||||
|
- When creating or updating screen(s), the screen_name, host_group are required.
|
||||||
|
- When deleting screen(s), the screen_name is required.
|
||||||
|
- 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.'
|
||||||
|
required: true
|
||||||
|
notes:
|
||||||
|
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Create/update a screen.
|
||||||
|
- name: Create a new screen or update an existing screen's items
|
||||||
|
local_action:
|
||||||
|
module: zabbix_screen
|
||||||
|
server_url: http://monitor.example.com
|
||||||
|
login_user: username
|
||||||
|
login_password: password
|
||||||
|
screens:
|
||||||
|
- screen_name: ExampleScreen1
|
||||||
|
host_group: Example group1
|
||||||
|
state: present
|
||||||
|
graph_names:
|
||||||
|
- Example graph1
|
||||||
|
- Example graph2
|
||||||
|
graph_width: 200
|
||||||
|
graph_height: 100
|
||||||
|
|
||||||
|
# Create/update multi-screen
|
||||||
|
- name: Create two of new screens or update the existing screens' items
|
||||||
|
local_action:
|
||||||
|
module: zabbix_screen
|
||||||
|
server_url: http://monitor.example.com
|
||||||
|
login_user: username
|
||||||
|
login_password: password
|
||||||
|
screens:
|
||||||
|
- screen_name: ExampleScreen1
|
||||||
|
host_group: Example group1
|
||||||
|
state: present
|
||||||
|
graph_names:
|
||||||
|
- Example graph1
|
||||||
|
- Example graph2
|
||||||
|
graph_width: 200
|
||||||
|
graph_height: 100
|
||||||
|
- screen_name: ExampleScreen2
|
||||||
|
host_group: Example group2
|
||||||
|
state: present
|
||||||
|
graph_names:
|
||||||
|
- Example graph1
|
||||||
|
- Example graph2
|
||||||
|
graph_width: 200
|
||||||
|
graph_height: 100
|
||||||
|
|
||||||
|
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
|
||||||
|
- name: Create a new screen or update an existing screen's items
|
||||||
|
local_action:
|
||||||
|
module: zabbix_screen
|
||||||
|
server_url: http://monitor.example.com
|
||||||
|
login_user: username
|
||||||
|
login_password: password
|
||||||
|
state: present
|
||||||
|
screens:
|
||||||
|
- screen_name: ExampleScreen
|
||||||
|
host_group: Example group
|
||||||
|
state: present
|
||||||
|
graph_names:
|
||||||
|
- Example graph1
|
||||||
|
- Example graph2
|
||||||
|
graph_width: 200
|
||||||
|
graph_height: 100
|
||||||
|
when: inventory_hostname==groups['group_name'][0]
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
try:
|
||||||
|
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||||
|
from zabbix_api import ZabbixAPIException
|
||||||
|
from zabbix_api import Already_Exists
|
||||||
|
HAS_ZABBIX_API = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_ZABBIX_API = False
|
||||||
|
|
||||||
|
|
||||||
|
# Extend the ZabbixAPI
|
||||||
|
# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
|
||||||
|
# we have to inherit the ZabbixAPI class to add 'screenitem' support.
|
||||||
|
class ZabbixAPIExtends(ZabbixAPI):
|
||||||
|
screenitem = None
|
||||||
|
|
||||||
|
def __init__(self, server, timeout, **kwargs):
|
||||||
|
ZabbixAPI.__init__(self, server, timeout=timeout)
|
||||||
|
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
class Screen(object):
|
||||||
|
def __init__(self, module, zbx):
|
||||||
|
self._module = module
|
||||||
|
self._zapi = zbx
|
||||||
|
|
||||||
|
# get group id by group name
|
||||||
|
def get_host_group_id(self, group_name):
|
||||||
|
if group_name == "":
|
||||||
|
self._module.fail_json(msg="group_name is required")
|
||||||
|
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
|
||||||
|
if len(hostGroup_list) < 1:
|
||||||
|
self._module.fail_json(msg="Host group not found: %s" % group_name)
|
||||||
|
else:
|
||||||
|
hostGroup_id = hostGroup_list[0]['groupid']
|
||||||
|
return hostGroup_id
|
||||||
|
|
||||||
|
# get monitored host_id by host_group_id
|
||||||
|
def get_host_ids_by_group_id(self, group_id):
|
||||||
|
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
|
||||||
|
if len(host_list) < 1:
|
||||||
|
self._module.fail_json(msg="No host in the group.")
|
||||||
|
else:
|
||||||
|
host_ids = []
|
||||||
|
for i in host_list:
|
||||||
|
host_id = i['hostid']
|
||||||
|
host_ids.append(host_id)
|
||||||
|
return host_ids
|
||||||
|
|
||||||
|
# get screen
|
||||||
|
def get_screen_id(self, screen_name):
|
||||||
|
if screen_name == "":
|
||||||
|
self._module.fail_json(msg="screen_name is required")
|
||||||
|
try:
|
||||||
|
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
|
||||||
|
if len(screen_id_list) >= 1:
|
||||||
|
screen_id = screen_id_list[0]['screenid']
|
||||||
|
return screen_id
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
|
||||||
|
|
||||||
|
# create screen
|
||||||
|
def create_screen(self, screen_name, h_size, v_size):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
|
||||||
|
return screen['screenids'][0]
|
||||||
|
except Exception as e:
|
||||||
|
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
|
||||||
|
|
||||||
|
# update screen
|
||||||
|
def update_screen(self, screen_id, screen_name, h_size, v_size):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
|
||||||
|
except Exception as e:
|
||||||
|
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
|
||||||
|
|
||||||
|
# delete screen
|
||||||
|
def delete_screen(self, screen_id, screen_name):
|
||||||
|
try:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.screen.delete([screen_id])
|
||||||
|
except Exception as e:
|
||||||
|
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
|
||||||
|
|
||||||
|
# get graph ids
|
||||||
|
def get_graph_ids(self, hosts, graph_name_list):
|
||||||
|
graph_id_lists = []
|
||||||
|
vsize = 1
|
||||||
|
for host in hosts:
|
||||||
|
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
|
||||||
|
size = len(graph_id_list)
|
||||||
|
if size > 0:
|
||||||
|
graph_id_lists.extend(graph_id_list)
|
||||||
|
if vsize < size:
|
||||||
|
vsize = size
|
||||||
|
return graph_id_lists, vsize
|
||||||
|
|
||||||
|
# getGraphs
|
||||||
|
def get_graphs_by_host_id(self, graph_name_list, host_id):
|
||||||
|
graph_ids = []
|
||||||
|
for graph_name in graph_name_list:
|
||||||
|
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
|
||||||
|
graph_id_list = []
|
||||||
|
if len(graphs_list) > 0:
|
||||||
|
for graph in graphs_list:
|
||||||
|
graph_id = graph['graphid']
|
||||||
|
graph_id_list.append(graph_id)
|
||||||
|
if len(graph_id_list) > 0:
|
||||||
|
graph_ids.extend(graph_id_list)
|
||||||
|
return graph_ids
|
||||||
|
|
||||||
|
# get screen items
|
||||||
|
def get_screen_items(self, screen_id):
|
||||||
|
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
|
||||||
|
return screen_item_list
|
||||||
|
|
||||||
|
# delete screen items
|
||||||
|
def delete_screen_items(self, screen_id, screen_item_id_list):
|
||||||
|
try:
|
||||||
|
if len(screen_item_id_list) == 0:
|
||||||
|
return True
|
||||||
|
screen_item_list = self.get_screen_items(screen_id)
|
||||||
|
if len(screen_item_list) > 0:
|
||||||
|
if self._module.check_mode:
|
||||||
|
self._module.exit_json(changed=True)
|
||||||
|
self._zapi.screenitem.delete(screen_item_id_list)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except ZabbixAPIException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# get screen's hsize and vsize
|
||||||
|
def get_hsize_vsize(self, hosts, v_size):
|
||||||
|
h_size = len(hosts)
|
||||||
|
if h_size == 1:
|
||||||
|
if v_size == 1:
|
||||||
|
h_size = 1
|
||||||
|
elif v_size in range(2, 9):
|
||||||
|
h_size = 2
|
||||||
|
else:
|
||||||
|
h_size = 3
|
||||||
|
v_size = (v_size - 1) / h_size + 1
|
||||||
|
return h_size, v_size
|
||||||
|
|
||||||
|
# create screen_items
|
||||||
|
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
|
||||||
|
if len(hosts) < 4:
|
||||||
|
if width is None or width < 0:
|
||||||
|
width = 500
|
||||||
|
else:
|
||||||
|
if width is None or width < 0:
|
||||||
|
width = 200
|
||||||
|
if height is None or height < 0:
|
||||||
|
height = 100
|
||||||
|
|
||||||
|
try:
|
||||||
|
# when there're only one host, only one row is not good.
|
||||||
|
if len(hosts) == 1:
|
||||||
|
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
|
||||||
|
for i, graph_id in enumerate(graph_id_list):
|
||||||
|
if graph_id is not None:
|
||||||
|
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
|
||||||
|
'width': width, 'height': height,
|
||||||
|
'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
|
||||||
|
'elements': 0, 'valign': 0, 'halign': 0,
|
||||||
|
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
|
||||||
|
else:
|
||||||
|
for i, host in enumerate(hosts):
|
||||||
|
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
|
||||||
|
for j, graph_id in enumerate(graph_id_list):
|
||||||
|
if graph_id is not None:
|
||||||
|
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
|
||||||
|
'width': width, 'height': height,
|
||||||
|
'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
|
||||||
|
'elements': 0, 'valign': 0, 'halign': 0,
|
||||||
|
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
|
||||||
|
except Already_Exists:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
server_url=dict(required=True, aliases=['url']),
|
||||||
|
login_user=dict(required=True),
|
||||||
|
login_password=dict(required=True),
|
||||||
|
timeout=dict(default=10),
|
||||||
|
screens=dict(required=True)
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_ZABBIX_API:
|
||||||
|
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
|
||||||
|
|
||||||
|
server_url = module.params['server_url']
|
||||||
|
login_user = module.params['login_user']
|
||||||
|
login_password = module.params['login_password']
|
||||||
|
timeout = module.params['timeout']
|
||||||
|
screens = module.params['screens']
|
||||||
|
|
||||||
|
zbx = None
|
||||||
|
# login to zabbix
|
||||||
|
try:
|
||||||
|
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
|
||||||
|
zbx.login(login_user, login_password)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
|
||||||
|
|
||||||
|
screen = Screen(module, zbx)
|
||||||
|
created_screens = []
|
||||||
|
changed_screens = []
|
||||||
|
deleted_screens = []
|
||||||
|
|
||||||
|
for zabbix_screen in screens:
|
||||||
|
screen_name = zabbix_screen['screen_name']
|
||||||
|
screen_id = screen.get_screen_id(screen_name)
|
||||||
|
state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
|
||||||
|
|
||||||
|
if state == "absent":
|
||||||
|
if screen_id:
|
||||||
|
screen_item_list = screen.get_screen_items(screen_id)
|
||||||
|
screen_item_id_list = []
|
||||||
|
for screen_item in screen_item_list:
|
||||||
|
screen_item_id = screen_item['screenitemid']
|
||||||
|
screen_item_id_list.append(screen_item_id)
|
||||||
|
screen.delete_screen_items(screen_id, screen_item_id_list)
|
||||||
|
screen.delete_screen(screen_id, screen_name)
|
||||||
|
|
||||||
|
deleted_screens.append(screen_name)
|
||||||
|
else:
|
||||||
|
host_group = zabbix_screen['host_group']
|
||||||
|
graph_names = zabbix_screen['graph_names']
|
||||||
|
graph_width = None
|
||||||
|
if 'graph_width' in zabbix_screen:
|
||||||
|
graph_width = zabbix_screen['graph_width']
|
||||||
|
graph_height = None
|
||||||
|
if 'graph_height' in zabbix_screen:
|
||||||
|
graph_height = zabbix_screen['graph_height']
|
||||||
|
host_group_id = screen.get_host_group_id(host_group)
|
||||||
|
hosts = screen.get_host_ids_by_group_id(host_group_id)
|
||||||
|
|
||||||
|
screen_item_id_list = []
|
||||||
|
resource_id_list = []
|
||||||
|
|
||||||
|
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
|
||||||
|
h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
|
||||||
|
|
||||||
|
if not screen_id:
|
||||||
|
# create screen
|
||||||
|
screen_id = screen.create_screen(screen_name, h_size, v_size)
|
||||||
|
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
|
||||||
|
created_screens.append(screen_name)
|
||||||
|
else:
|
||||||
|
screen_item_list = screen.get_screen_items(screen_id)
|
||||||
|
|
||||||
|
for screen_item in screen_item_list:
|
||||||
|
screen_item_id = screen_item['screenitemid']
|
||||||
|
resource_id = screen_item['resourceid']
|
||||||
|
screen_item_id_list.append(screen_item_id)
|
||||||
|
resource_id_list.append(resource_id)
|
||||||
|
|
||||||
|
# when the screen items changed, then update
|
||||||
|
if graph_ids != resource_id_list:
|
||||||
|
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
|
||||||
|
if deleted:
|
||||||
|
screen.update_screen(screen_id, screen_name, h_size, v_size)
|
||||||
|
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
|
||||||
|
changed_screens.append(screen_name)
|
||||||
|
|
||||||
|
if created_screens and changed_screens:
|
||||||
|
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
|
||||||
|
elif created_screens:
|
||||||
|
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
|
||||||
|
elif changed_screens:
|
||||||
|
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
|
||||||
|
elif deleted_screens:
|
||||||
|
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
|
||||||
|
else:
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
|
||||||
|
# <<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||||
|
main()
|
@ -0,0 +1,106 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
###
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: pushover
|
||||||
|
version_added: "2.0"
|
||||||
|
short_description: Send notifications via u(https://pushover.net)
|
||||||
|
description:
|
||||||
|
- Send notifications via pushover, to subscriber list of devices, and email
|
||||||
|
addresses. Requires pushover app on devices.
|
||||||
|
notes:
|
||||||
|
- You will require a pushover.net account to use this module. But no account
|
||||||
|
is required to receive messages.
|
||||||
|
options:
|
||||||
|
msg:
|
||||||
|
description:
|
||||||
|
What message you wish to send.
|
||||||
|
required: true
|
||||||
|
app_token:
|
||||||
|
description:
|
||||||
|
Pushover issued token identifying your pushover app.
|
||||||
|
required: true
|
||||||
|
user_key:
|
||||||
|
description:
|
||||||
|
Pushover issued authentication key for your user.
|
||||||
|
required: true
|
||||||
|
pri:
|
||||||
|
description: Message priority (see u(https://pushover.net) for details.)
|
||||||
|
required: false
|
||||||
|
|
||||||
|
author: Jim Richardson
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- local_action: pushover msg="{{inventory_hostname}} has exploded in flames,
|
||||||
|
It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59
|
||||||
|
'''
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
import httplib
|
||||||
|
|
||||||
|
|
||||||
|
class pushover(object):
|
||||||
|
''' Instantiates a pushover object, use it to send notifications '''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.host, self.port = 'api.pushover.net', 443
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
''' Do, whatever it is, we do. '''
|
||||||
|
# parse config
|
||||||
|
conn = httplib.HTTPSConnection(self.host, self.port)
|
||||||
|
conn.request("POST", "/1/messages.json",
|
||||||
|
urllib.urlencode(self.options),
|
||||||
|
{"Content-type": "application/x-www-form-urlencoded"})
|
||||||
|
conn.getresponse()
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
msg=dict(required=True),
|
||||||
|
app_token=dict(required=True),
|
||||||
|
user_key=dict(required=True),
|
||||||
|
pri=dict(required=False, default=0),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
msg_object = pushover()
|
||||||
|
msg_object.options = {}
|
||||||
|
msg_object.options['user'] = module.params['user_key']
|
||||||
|
msg_object.options['token'] = module.params['app_token']
|
||||||
|
msg_object.options['priority'] = module.params['pri']
|
||||||
|
msg_object.options['message'] = module.params['msg']
|
||||||
|
try:
|
||||||
|
msg_object.run()
|
||||||
|
except:
|
||||||
|
module.fail_json(msg='Unable to send msg via pushover')
|
||||||
|
|
||||||
|
module.exit_json(msg=msg, changed=False)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,148 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2015, Matt Makai <matthew.makai@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
version_added: "2.0"
|
||||||
|
module: sendgrid
|
||||||
|
short_description: Sends an email with the SendGrid API
|
||||||
|
description:
|
||||||
|
- Sends an email with a SendGrid account through their API, not through
|
||||||
|
the SMTP service.
|
||||||
|
notes:
|
||||||
|
- This module is non-idempotent because it sends an email through the
|
||||||
|
external API. It is idempotent only in the case that the module fails.
|
||||||
|
- Like the other notification modules, this one requires an external
|
||||||
|
dependency to work. In this case, you'll need an active SendGrid
|
||||||
|
account.
|
||||||
|
options:
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
username for logging into the SendGrid account
|
||||||
|
required: true
|
||||||
|
password:
|
||||||
|
description: password that corresponds to the username
|
||||||
|
required: true
|
||||||
|
from_address:
|
||||||
|
description:
|
||||||
|
the address in the "from" field for the email
|
||||||
|
required: true
|
||||||
|
to_addresses:
|
||||||
|
description:
|
||||||
|
a list with one or more recipient email addresses
|
||||||
|
required: true
|
||||||
|
subject:
|
||||||
|
description:
|
||||||
|
the desired subject for the email
|
||||||
|
required: true
|
||||||
|
|
||||||
|
author: Matt Makai
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# send an email to a single recipient that the deployment was successful
|
||||||
|
- sendgrid:
|
||||||
|
username: "{{ sendgrid_username }}"
|
||||||
|
password: "{{ sendgrid_password }}"
|
||||||
|
from_address: "ansible@mycompany.com"
|
||||||
|
to_addresses:
|
||||||
|
- "ops@mycompany.com"
|
||||||
|
subject: "Deployment success."
|
||||||
|
body: "The most recent Ansible deployment was successful."
|
||||||
|
delegate_to: localhost
|
||||||
|
|
||||||
|
# send an email to more than one recipient that the build failed
|
||||||
|
- sendgrid
|
||||||
|
username: "{{ sendgrid_username }}"
|
||||||
|
password: "{{ sendgrid_password }}"
|
||||||
|
from_address: "build@mycompany.com"
|
||||||
|
to_addresses:
|
||||||
|
- "ops@mycompany.com"
|
||||||
|
- "devteam@mycompany.com"
|
||||||
|
subject: "Build failure!."
|
||||||
|
body: "Unable to pull source repository from Git server."
|
||||||
|
delegate_to: localhost
|
||||||
|
'''
|
||||||
|
|
||||||
|
# =======================================
|
||||||
|
# sendgrid module support methods
|
||||||
|
#
|
||||||
|
try:
|
||||||
|
import urllib, urllib2
|
||||||
|
except ImportError:
|
||||||
|
module.fail_json(msg="urllib and urllib2 are required")
|
||||||
|
|
||||||
|
import base64
|
||||||
|
|
||||||
|
def post_sendgrid_api(module, username, password, from_address, to_addresses,
|
||||||
|
subject, body):
|
||||||
|
SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
|
||||||
|
AGENT = "Ansible"
|
||||||
|
data = {'api_user': username, 'api_key':password,
|
||||||
|
'from':from_address, 'subject': subject, 'text': body}
|
||||||
|
encoded_data = urllib.urlencode(data)
|
||||||
|
to_addresses_api = ''
|
||||||
|
for recipient in to_addresses:
|
||||||
|
if isinstance(recipient, unicode):
|
||||||
|
recipient = recipient.encode('utf-8')
|
||||||
|
to_addresses_api += '&to[]=%s' % recipient
|
||||||
|
encoded_data += to_addresses_api
|
||||||
|
request = urllib2.Request(SENDGRID_URI)
|
||||||
|
request.add_header('User-Agent', AGENT)
|
||||||
|
request.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
|
request.add_header('Accept', 'application/json')
|
||||||
|
return urllib2.urlopen(request, encoded_data)
|
||||||
|
|
||||||
|
|
||||||
|
# =======================================
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
username=dict(required=True),
|
||||||
|
password=dict(required=True, no_log=True),
|
||||||
|
from_address=dict(required=True),
|
||||||
|
to_addresses=dict(required=True, type='list'),
|
||||||
|
subject=dict(required=True),
|
||||||
|
body=dict(required=True),
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
username = module.params['username']
|
||||||
|
password = module.params['password']
|
||||||
|
from_address = module.params['from_address']
|
||||||
|
to_addresses = module.params['to_addresses']
|
||||||
|
subject = module.params['subject']
|
||||||
|
body = module.params['body']
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = post_sendgrid_api(module, username, password,
|
||||||
|
from_address, to_addresses, subject, body)
|
||||||
|
except Exception:
|
||||||
|
module.fail_json(msg="unable to send email through SendGrid API")
|
||||||
|
|
||||||
|
module.exit_json(msg=subject, changed=False)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,363 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
|
||||||
|
#
|
||||||
|
# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
|
||||||
|
# as a reference and starting point.
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
__author__ = 'cschmidt'
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
from urllib2 import Request, urlopen, URLError, HTTPError
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
import sys
|
||||||
|
import base64
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: maven_artifact
|
||||||
|
short_description: Downloads an Artifact from a Maven Repository
|
||||||
|
version_added: "2.0"
|
||||||
|
description:
|
||||||
|
- Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve
|
||||||
|
- snapshots or release versions of the artifact and will resolve the latest available version if one is not
|
||||||
|
- available.
|
||||||
|
author: Chris Schmidt <chris.schmidt () contrastsecurity.com>
|
||||||
|
requirements:
|
||||||
|
- python libxml
|
||||||
|
- python urllib2
|
||||||
|
options:
|
||||||
|
group_id:
|
||||||
|
description: The Maven groupId coordinate
|
||||||
|
required: true
|
||||||
|
artifact_id:
|
||||||
|
description: The maven artifactId coordinate
|
||||||
|
required: true
|
||||||
|
version:
|
||||||
|
description: The maven version coordinate
|
||||||
|
required: false
|
||||||
|
default: latest
|
||||||
|
classifier:
|
||||||
|
description: The maven classifier coordinate
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
extension:
|
||||||
|
description: The maven type/extension coordinate
|
||||||
|
required: false
|
||||||
|
default: jar
|
||||||
|
repository_url:
|
||||||
|
description: The URL of the Maven Repository to download from
|
||||||
|
required: false
|
||||||
|
default: http://repo1.maven.org/maven2
|
||||||
|
username:
|
||||||
|
description: The username to authenticate as to the Maven Repository
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
password:
|
||||||
|
description: The passwor to authenticate with to the Maven Repository
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
dest:
|
||||||
|
description: The path where the artifact should be written to
|
||||||
|
required: true
|
||||||
|
default: false
|
||||||
|
state:
|
||||||
|
description: The desired state of the artifact
|
||||||
|
required: true
|
||||||
|
default: present
|
||||||
|
choices: [present,absent]
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Download the latest version of the commons-collections artifact from Maven Central
|
||||||
|
- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar
|
||||||
|
|
||||||
|
# Download Apache Commons-Collections 3.2 from Maven Central
|
||||||
|
- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar
|
||||||
|
|
||||||
|
# Download an artifact from a private repository requiring authentication
|
||||||
|
- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar
|
||||||
|
|
||||||
|
# Download a WAR File to the Tomcat webapps directory to be deployed
|
||||||
|
- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war
|
||||||
|
'''
|
||||||
|
|
||||||
|
class Artifact(object):
|
||||||
|
def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'):
|
||||||
|
if not group_id:
|
||||||
|
raise ValueError("group_id must be set")
|
||||||
|
if not artifact_id:
|
||||||
|
raise ValueError("artifact_id must be set")
|
||||||
|
|
||||||
|
self.group_id = group_id
|
||||||
|
self.artifact_id = artifact_id
|
||||||
|
self.version = version
|
||||||
|
self.classifier = classifier
|
||||||
|
|
||||||
|
if not extension:
|
||||||
|
self.extension = "jar"
|
||||||
|
else:
|
||||||
|
self.extension = extension
|
||||||
|
|
||||||
|
def is_snapshot(self):
|
||||||
|
return self.version and self.version.endswith("SNAPSHOT")
|
||||||
|
|
||||||
|
def path(self, with_version=True):
|
||||||
|
base = self.group_id.replace(".", "/") + "/" + self.artifact_id
|
||||||
|
if with_version and self.version:
|
||||||
|
return base + "/" + self.version
|
||||||
|
else:
|
||||||
|
return base
|
||||||
|
|
||||||
|
def _generate_filename(self):
|
||||||
|
if not self.classifier:
|
||||||
|
return self.artifact_id + "." + self.extension
|
||||||
|
else:
|
||||||
|
return self.artifact_id + "-" + self.classifier + "." + self.extension
|
||||||
|
|
||||||
|
def get_filename(self, filename=None):
|
||||||
|
if not filename:
|
||||||
|
filename = self._generate_filename()
|
||||||
|
elif os.path.isdir(filename):
|
||||||
|
filename = os.path.join(filename, self._generate_filename())
|
||||||
|
return filename
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.classifier:
|
||||||
|
return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
|
||||||
|
elif self.extension != "jar":
|
||||||
|
return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
|
||||||
|
else:
|
||||||
|
return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse(input):
|
||||||
|
parts = input.split(":")
|
||||||
|
if len(parts) >= 3:
|
||||||
|
g = parts[0]
|
||||||
|
a = parts[1]
|
||||||
|
v = parts[len(parts) - 1]
|
||||||
|
t = None
|
||||||
|
c = None
|
||||||
|
if len(parts) == 4:
|
||||||
|
t = parts[2]
|
||||||
|
if len(parts) == 5:
|
||||||
|
t = parts[2]
|
||||||
|
c = parts[3]
|
||||||
|
return Artifact(g, a, v, c, t)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class MavenDownloader:
|
||||||
|
def __init__(self, base="http://repo1.maven.org/maven2", username=None, password=None):
|
||||||
|
if base.endswith("/"):
|
||||||
|
base = base.rstrip("/")
|
||||||
|
self.base = base
|
||||||
|
self.user_agent = "Maven Artifact Downloader/1.0"
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
|
||||||
|
def _find_latest_version_available(self, artifact):
|
||||||
|
path = "/%s/maven-metadata.xml" % (artifact.path(False))
|
||||||
|
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
||||||
|
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
|
||||||
|
if v:
|
||||||
|
return v[0]
|
||||||
|
|
||||||
|
def find_uri_for_artifact(self, artifact):
|
||||||
|
if artifact.is_snapshot():
|
||||||
|
path = "/%s/maven-metadata.xml" % (artifact.path())
|
||||||
|
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
||||||
|
basexpath = "/metadata/versioning/"
|
||||||
|
p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion")
|
||||||
|
if p:
|
||||||
|
return self._find_matching_artifact(p, artifact)
|
||||||
|
else:
|
||||||
|
return self._uri_for_artifact(artifact)
|
||||||
|
|
||||||
|
def _find_matching_artifact(self, elems, artifact):
|
||||||
|
filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems)
|
||||||
|
if artifact.classifier:
|
||||||
|
filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems)
|
||||||
|
|
||||||
|
if len(filtered) > 1:
|
||||||
|
print(
|
||||||
|
"There was more than one match. Selecting the first one. Try adding a classifier to get a better match.")
|
||||||
|
elif not len(filtered):
|
||||||
|
print("There were no matches.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
elem = filtered[0]
|
||||||
|
value = elem.xpath("value/text()")
|
||||||
|
return self._uri_for_artifact(artifact, value[0])
|
||||||
|
|
||||||
|
def _uri_for_artifact(self, artifact, version=None):
|
||||||
|
if artifact.is_snapshot() and not version:
|
||||||
|
raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
|
||||||
|
elif not artifact.is_snapshot():
|
||||||
|
version = artifact.version
|
||||||
|
if artifact.classifier:
|
||||||
|
return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension
|
||||||
|
|
||||||
|
return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension
|
||||||
|
|
||||||
|
def _request(self, url, failmsg, f):
|
||||||
|
if not self.username:
|
||||||
|
headers = {"User-Agent": self.user_agent}
|
||||||
|
else:
|
||||||
|
headers = {
|
||||||
|
"User-Agent": self.user_agent,
|
||||||
|
"Authorization": "Basic " + base64.b64encode(self.username + ":" + self.password)
|
||||||
|
}
|
||||||
|
req = Request(url, None, headers)
|
||||||
|
try:
|
||||||
|
response = urlopen(req)
|
||||||
|
except HTTPError, e:
|
||||||
|
raise ValueError(failmsg + " because of " + str(e) + "for URL " + url)
|
||||||
|
except URLError, e:
|
||||||
|
raise ValueError(failmsg + " because of " + str(e) + "for URL " + url)
|
||||||
|
else:
|
||||||
|
return f(response)
|
||||||
|
|
||||||
|
|
||||||
|
def download(self, artifact, filename=None):
|
||||||
|
filename = artifact.get_filename(filename)
|
||||||
|
if not artifact.version or artifact.version == "latest":
|
||||||
|
artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact),
|
||||||
|
artifact.classifier, artifact.extension)
|
||||||
|
|
||||||
|
url = self.find_uri_for_artifact(artifact)
|
||||||
|
if not self.verify_md5(filename, url + ".md5"):
|
||||||
|
response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
|
||||||
|
if response:
|
||||||
|
with open(filename, 'w') as f:
|
||||||
|
# f.write(response.read())
|
||||||
|
self._write_chunks(response, f, report_hook=self.chunk_report)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def chunk_report(self, bytes_so_far, chunk_size, total_size):
|
||||||
|
percent = float(bytes_so_far) / total_size
|
||||||
|
percent = round(percent * 100, 2)
|
||||||
|
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
|
||||||
|
(bytes_so_far, total_size, percent))
|
||||||
|
|
||||||
|
if bytes_so_far >= total_size:
|
||||||
|
sys.stdout.write('\n')
|
||||||
|
|
||||||
|
def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
|
||||||
|
total_size = response.info().getheader('Content-Length').strip()
|
||||||
|
total_size = int(total_size)
|
||||||
|
bytes_so_far = 0
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
chunk = response.read(chunk_size)
|
||||||
|
bytes_so_far += len(chunk)
|
||||||
|
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
|
||||||
|
file.write(chunk)
|
||||||
|
if report_hook:
|
||||||
|
report_hook(bytes_so_far, chunk_size, total_size)
|
||||||
|
|
||||||
|
return bytes_so_far
|
||||||
|
|
||||||
|
def verify_md5(self, file, remote_md5):
|
||||||
|
if not os.path.exists(file):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
local_md5 = self._local_md5(file)
|
||||||
|
remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read())
|
||||||
|
return local_md5 == remote
|
||||||
|
|
||||||
|
def _local_md5(self, file):
|
||||||
|
md5 = hashlib.md5()
|
||||||
|
with open(file, 'rb') as f:
|
||||||
|
for chunk in iter(lambda: f.read(8192), ''):
|
||||||
|
md5.update(chunk)
|
||||||
|
return md5.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
group_id = dict(default=None),
|
||||||
|
artifact_id = dict(default=None),
|
||||||
|
version = dict(default=None),
|
||||||
|
classifier = dict(default=None),
|
||||||
|
extension = dict(default=None),
|
||||||
|
repository_url = dict(default=None),
|
||||||
|
username = dict(default=None),
|
||||||
|
password = dict(default=None),
|
||||||
|
state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
|
||||||
|
dest = dict(default=None),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
group_id = module.params["group_id"]
|
||||||
|
artifact_id = module.params["artifact_id"]
|
||||||
|
version = module.params["version"]
|
||||||
|
classifier = module.params["classifier"]
|
||||||
|
extension = module.params["extension"]
|
||||||
|
repository_url = module.params["repository_url"]
|
||||||
|
repository_username = module.params["username"]
|
||||||
|
repository_password = module.params["password"]
|
||||||
|
state = module.params["state"]
|
||||||
|
dest = module.params["dest"]
|
||||||
|
|
||||||
|
if not repository_url:
|
||||||
|
repository_url = "http://repo1.maven.org/maven2"
|
||||||
|
|
||||||
|
downloader = MavenDownloader(repository_url, repository_username, repository_password)
|
||||||
|
|
||||||
|
try:
|
||||||
|
artifact = Artifact(group_id, artifact_id, version, classifier, extension)
|
||||||
|
except ValueError as e:
|
||||||
|
module.fail_json(msg=e.args[0])
|
||||||
|
|
||||||
|
prev_state = "absent"
|
||||||
|
if os.path.isdir(dest):
|
||||||
|
dest = dest + "/" + artifact_id + "-" + version + "." + extension
|
||||||
|
if os.path.lexists(dest):
|
||||||
|
prev_state = "present"
|
||||||
|
else:
|
||||||
|
path = os.path.dirname(dest)
|
||||||
|
if not os.path.exists(path):
|
||||||
|
os.makedirs(path)
|
||||||
|
|
||||||
|
if prev_state == "present":
|
||||||
|
module.exit_json(dest=dest, state=state, changed=False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if downloader.download(artifact, dest):
|
||||||
|
module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True)
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="Unable to download the artifact")
|
||||||
|
except ValueError as e:
|
||||||
|
module.fail_json(msg=e.args[0])
|
||||||
|
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.urls import *
|
||||||
|
main()
|
@ -0,0 +1,163 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: pkg5
|
||||||
|
author: Peter Oliver
|
||||||
|
short_description: Manages packages with the Solaris 11 Image Packaging System
|
||||||
|
version_added: 1.9
|
||||||
|
description:
|
||||||
|
- IPS packages are the native packages in Solaris 11 and higher.
|
||||||
|
notes:
|
||||||
|
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- An FRMI of the package(s) to be installed/removed/updated.
|
||||||
|
- Multiple packages may be specified, separated by C(,).
|
||||||
|
required: true
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
|
||||||
|
package.
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ present, latest, absent ]
|
||||||
|
accept_licenses:
|
||||||
|
description:
|
||||||
|
- Accept any licences.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
choices: [ true, false ]
|
||||||
|
aliases: [ accept_licences, accept ]
|
||||||
|
'''
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Install Vim:
|
||||||
|
- pkg5: name=editor/vim
|
||||||
|
|
||||||
|
# Remove finger daemon:
|
||||||
|
- pkg5: name=service/network/finger state=absent
|
||||||
|
|
||||||
|
# Install several packages at once:
|
||||||
|
- pkg5:
|
||||||
|
name:
|
||||||
|
- /file/gnu-findutils
|
||||||
|
- /text/gnu-grep
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(required=True, type='list'),
|
||||||
|
state=dict(
|
||||||
|
default='present',
|
||||||
|
choices=[
|
||||||
|
'present',
|
||||||
|
'installed',
|
||||||
|
'latest',
|
||||||
|
'absent',
|
||||||
|
'uninstalled',
|
||||||
|
'removed',
|
||||||
|
]
|
||||||
|
),
|
||||||
|
accept_licenses=dict(
|
||||||
|
choices=BOOLEANS,
|
||||||
|
default=False,
|
||||||
|
aliases=['accept_licences', 'accept'],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
params = module.params
|
||||||
|
packages = []
|
||||||
|
|
||||||
|
# pkg(5) FRMIs include a comma before the release number, but
|
||||||
|
# AnsibleModule will have split this into multiple items for us.
|
||||||
|
# Try to spot where this has happened and fix it.
|
||||||
|
for fragment in params['name']:
|
||||||
|
if (
|
||||||
|
re.search('^\d+(?:\.\d+)*', fragment)
|
||||||
|
and packages and re.search('@[^,]*$', packages[-1])
|
||||||
|
):
|
||||||
|
packages[-1] += ',' + fragment
|
||||||
|
else:
|
||||||
|
packages.append(fragment)
|
||||||
|
|
||||||
|
if params['state'] in ['present', 'installed']:
|
||||||
|
ensure(module, 'present', packages, params)
|
||||||
|
elif params['state'] in ['latest']:
|
||||||
|
ensure(module, 'latest', packages, params)
|
||||||
|
elif params['state'] in ['absent', 'uninstalled', 'removed']:
|
||||||
|
ensure(module, 'absent', packages, params)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure(module, state, packages, params):
|
||||||
|
response = {
|
||||||
|
'results': [],
|
||||||
|
'msg': '',
|
||||||
|
}
|
||||||
|
behaviour = {
|
||||||
|
'present': {
|
||||||
|
'filter': lambda p: not is_installed(module, p),
|
||||||
|
'subcommand': 'install',
|
||||||
|
},
|
||||||
|
'latest': {
|
||||||
|
'filter': lambda p: not is_latest(module, p),
|
||||||
|
'subcommand': 'install',
|
||||||
|
},
|
||||||
|
'absent': {
|
||||||
|
'filter': lambda p: is_installed(module, p),
|
||||||
|
'subcommand': 'uninstall',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
to_modify = filter(behaviour[state]['filter'], packages)
|
||||||
|
if to_modify:
|
||||||
|
rc, out, err = module.run_command(
|
||||||
|
[
|
||||||
|
'pkg', behaviour[state]['subcommand']
|
||||||
|
]
|
||||||
|
+ (['--accept'] if params['accept_licenses'] else [])
|
||||||
|
+ [
|
||||||
|
'-q', '--'
|
||||||
|
] + to_modify
|
||||||
|
)
|
||||||
|
response['rc'] = rc
|
||||||
|
response['results'].append(out)
|
||||||
|
response['msg'] += err
|
||||||
|
response['changed'] = True
|
||||||
|
if rc != 0:
|
||||||
|
module.fail_json(**response)
|
||||||
|
|
||||||
|
module.exit_json(**response)
|
||||||
|
|
||||||
|
|
||||||
|
def is_installed(module, package):
|
||||||
|
rc, out, err = module.run_command(['pkg', 'list', '--', package])
|
||||||
|
return True if rc == 0 else False
|
||||||
|
|
||||||
|
|
||||||
|
def is_latest(module, package):
|
||||||
|
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
|
||||||
|
return True if rc == 1 else False
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,195 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: pkg5_publisher
|
||||||
|
author: Peter Oliver
|
||||||
|
short_description: Manages Solaris 11 Image Packaging System publishers
|
||||||
|
version_added: 1.9
|
||||||
|
description:
|
||||||
|
- IPS packages are the native packages in Solaris 11 and higher.
|
||||||
|
- This modules will configure which publishers a client will download IPS
|
||||||
|
packages from.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- The publisher's name.
|
||||||
|
required: true
|
||||||
|
aliases: [ publisher ]
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to ensure that a publisher is present or absent.
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ present, absent ]
|
||||||
|
sticky:
|
||||||
|
description:
|
||||||
|
- Packages installed from a sticky repository can only receive updates
|
||||||
|
from that repository.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
choices: [ true, false ]
|
||||||
|
enabled:
|
||||||
|
description:
|
||||||
|
- Is the repository enabled or disabled?
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
choices: [ true, false ]
|
||||||
|
origin:
|
||||||
|
description:
|
||||||
|
- A path or URL to the repository.
|
||||||
|
- Multiple values may be provided.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
mirror:
|
||||||
|
description:
|
||||||
|
- A path or URL to the repository mirror.
|
||||||
|
- Multiple values may be provided.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
'''
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Fetch packages for the solaris publisher direct from Oracle:
|
||||||
|
- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/
|
||||||
|
|
||||||
|
# Configure a publisher for locally-produced packages:
|
||||||
|
- pkg5_publisher: name=site origin=https://pkg.example.com/site/
|
||||||
|
'''
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(required=True, aliases=['publisher']),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
sticky=dict(choices=BOOLEANS),
|
||||||
|
enabled=dict(choices=BOOLEANS),
|
||||||
|
# search_after=dict(),
|
||||||
|
# search_before=dict(),
|
||||||
|
origin=dict(type='list'),
|
||||||
|
mirror=dict(type='list'),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
for option in ['origin', 'mirror']:
|
||||||
|
if module.params[option] == ['']:
|
||||||
|
module.params[option] = []
|
||||||
|
|
||||||
|
if module.params['state'] == 'present':
|
||||||
|
modify_publisher(module, module.params)
|
||||||
|
else:
|
||||||
|
unset_publisher(module, module.params['name'])
|
||||||
|
|
||||||
|
|
||||||
|
def modify_publisher(module, params):
|
||||||
|
name = params['name']
|
||||||
|
existing = get_publishers(module)
|
||||||
|
|
||||||
|
if name in existing:
|
||||||
|
for option in ['origin', 'mirror', 'sticky', 'enabled']:
|
||||||
|
if params[option] != None:
|
||||||
|
if params[option] != existing[name][option]:
|
||||||
|
return set_publisher(module, params)
|
||||||
|
else:
|
||||||
|
return set_publisher(module, params)
|
||||||
|
|
||||||
|
module.exit_json()
|
||||||
|
|
||||||
|
|
||||||
|
def set_publisher(module, params):
|
||||||
|
name = params['name']
|
||||||
|
args = []
|
||||||
|
|
||||||
|
if params['origin'] != None:
|
||||||
|
args.append('--remove-origin=*')
|
||||||
|
args.extend(['--add-origin=' + u for u in params['origin']])
|
||||||
|
if params['mirror'] != None:
|
||||||
|
args.append('--remove-mirror=*')
|
||||||
|
args.extend(['--add-mirror=' + u for u in params['mirror']])
|
||||||
|
|
||||||
|
if params['sticky'] != None:
|
||||||
|
args.append('--sticky' if params['sticky'] else '--non-sticky')
|
||||||
|
if params['enabled'] != None:
|
||||||
|
args.append('--enable' if params['enabled'] else '--disable')
|
||||||
|
|
||||||
|
rc, out, err = module.run_command(
|
||||||
|
["pkg", "set-publisher"] + args + [name],
|
||||||
|
check_rc=True
|
||||||
|
)
|
||||||
|
response = {
|
||||||
|
'rc': rc,
|
||||||
|
'results': [out],
|
||||||
|
'msg': err,
|
||||||
|
'changed': True,
|
||||||
|
}
|
||||||
|
module.exit_json(**response)
|
||||||
|
|
||||||
|
|
||||||
|
def unset_publisher(module, publisher):
|
||||||
|
if not publisher in get_publishers(module):
|
||||||
|
module.exit_json()
|
||||||
|
|
||||||
|
rc, out, err = module.run_command(
|
||||||
|
["pkg", "unset-publisher", publisher],
|
||||||
|
check_rc=True
|
||||||
|
)
|
||||||
|
response = {
|
||||||
|
'rc': rc,
|
||||||
|
'results': [out],
|
||||||
|
'msg': err,
|
||||||
|
'changed': True,
|
||||||
|
}
|
||||||
|
module.exit_json(**response)
|
||||||
|
|
||||||
|
|
||||||
|
def get_publishers(module):
|
||||||
|
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
|
||||||
|
|
||||||
|
lines = out.splitlines()
|
||||||
|
keys = lines.pop(0).lower().split("\t")
|
||||||
|
|
||||||
|
publishers = {}
|
||||||
|
for line in lines:
|
||||||
|
values = dict(zip(keys, map(unstringify, line.split("\t"))))
|
||||||
|
name = values['publisher']
|
||||||
|
|
||||||
|
if not name in publishers:
|
||||||
|
publishers[name] = dict(
|
||||||
|
(k, values[k]) for k in ['sticky', 'enabled']
|
||||||
|
)
|
||||||
|
publishers[name]['origin'] = []
|
||||||
|
publishers[name]['mirror'] = []
|
||||||
|
|
||||||
|
publishers[name][values['type']].append(values['uri'])
|
||||||
|
|
||||||
|
return publishers
|
||||||
|
|
||||||
|
|
||||||
|
def unstringify(val):
|
||||||
|
if val == "-":
|
||||||
|
return None
|
||||||
|
elif val == "true":
|
||||||
|
return True
|
||||||
|
elif val == "false":
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,430 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
# Cronvar Plugin: The goal of this plugin is to provide an indempotent
|
||||||
|
# method for set cron variable values. It should play well with the
|
||||||
|
# existing cron module as well as allow for manually added variables.
|
||||||
|
# Each variable entered will be preceded with a comment describing the
|
||||||
|
# variable so that it can be found later. This is required to be
|
||||||
|
# present in order for this plugin to find/modify the variable
|
||||||
|
#
|
||||||
|
# This module is based on the crontab module.
|
||||||
|
#
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: cronvar
|
||||||
|
short_description: Manage variables in crontabs
|
||||||
|
description:
|
||||||
|
- Use this module to manage crontab variables. This module allows
|
||||||
|
you to create, update, or delete cron variable definitions.
|
||||||
|
version_added: "2.0"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the crontab variable.
|
||||||
|
default: null
|
||||||
|
required: true
|
||||||
|
value:
|
||||||
|
description:
|
||||||
|
- The value to set this variable to. Required if state=present.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
insertafter:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- Used with C(state=present). If specified, the variable will be inserted
|
||||||
|
after the variable specified.
|
||||||
|
insertbefore:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- Used with C(state=present). If specified, the variable will be inserted
|
||||||
|
just before the variable specified.
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to ensure that the variable is present or absent.
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ "present", "absent" ]
|
||||||
|
user:
|
||||||
|
description:
|
||||||
|
- The specific user whose crontab should be modified.
|
||||||
|
required: false
|
||||||
|
default: root
|
||||||
|
cron_file:
|
||||||
|
description:
|
||||||
|
- If specified, uses this file in cron.d instead of an individual user's crontab.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
backup:
|
||||||
|
description:
|
||||||
|
- If set, create a backup of the crontab before it is modified.
|
||||||
|
The location of the backup is returned in the C(backup) variable by this module.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
requirements:
|
||||||
|
- cron
|
||||||
|
author: Doug Luce
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Ensure a variable exists.
|
||||||
|
# Creates an entry like "EMAIL=doug@ansibmod.con.com"
|
||||||
|
- cronvar: name="EMAIL" value="doug@ansibmod.con.com"
|
||||||
|
|
||||||
|
# Make sure a variable is gone. This will remove any variable named
|
||||||
|
# "LEGACY"
|
||||||
|
- cronvar: name="LEGACY" state=absent
|
||||||
|
|
||||||
|
# Adds a variable to a file under /etc/cron.d
|
||||||
|
- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log"
|
||||||
|
user="root" cron_file=ansible_yum-autoupdate
|
||||||
|
'''
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import tempfile
|
||||||
|
import platform
|
||||||
|
import pipes
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
CRONCMD = "/usr/bin/crontab"
|
||||||
|
|
||||||
|
class CronVarError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CronVar(object):
|
||||||
|
"""
|
||||||
|
CronVar object to write variables to crontabs.
|
||||||
|
|
||||||
|
user - the user of the crontab (defaults to root)
|
||||||
|
cron_file - a cron file under /etc/cron.d
|
||||||
|
"""
|
||||||
|
def __init__(self, module, user=None, cron_file=None):
|
||||||
|
self.module = module
|
||||||
|
self.user = user
|
||||||
|
if self.user is None:
|
||||||
|
self.user = 'root'
|
||||||
|
self.lines = None
|
||||||
|
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', ))
|
||||||
|
# select whether we dump additional debug info through syslog
|
||||||
|
self.syslogging = False
|
||||||
|
|
||||||
|
if cron_file:
|
||||||
|
self.cron_file = '/etc/cron.d/%s' % cron_file
|
||||||
|
else:
|
||||||
|
self.cron_file = None
|
||||||
|
|
||||||
|
self.read()
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
# Read in the crontab from the system
|
||||||
|
self.lines = []
|
||||||
|
if self.cron_file:
|
||||||
|
# read the cronfile
|
||||||
|
try:
|
||||||
|
f = open(self.cron_file, 'r')
|
||||||
|
self.lines = f.read().splitlines()
|
||||||
|
f.close()
|
||||||
|
except IOError, e:
|
||||||
|
# cron file does not exist
|
||||||
|
return
|
||||||
|
except:
|
||||||
|
raise CronVarError("Unexpected error:", sys.exc_info()[0])
|
||||||
|
else:
|
||||||
|
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
|
||||||
|
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
|
||||||
|
|
||||||
|
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
|
||||||
|
raise CronVarError("Unable to read crontab")
|
||||||
|
|
||||||
|
lines = out.splitlines()
|
||||||
|
count = 0
|
||||||
|
for l in lines:
|
||||||
|
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
|
||||||
|
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
|
||||||
|
not re.match( r'# \(.*version.*\)', l)):
|
||||||
|
self.lines.append(l)
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
def log_message(self, message):
|
||||||
|
if self.syslogging:
|
||||||
|
syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message)
|
||||||
|
|
||||||
|
def write(self, backup_file=None):
|
||||||
|
"""
|
||||||
|
Write the crontab to the system. Saves all information.
|
||||||
|
"""
|
||||||
|
if backup_file:
|
||||||
|
fileh = open(backup_file, 'w')
|
||||||
|
elif self.cron_file:
|
||||||
|
fileh = open(self.cron_file, 'w')
|
||||||
|
else:
|
||||||
|
filed, path = tempfile.mkstemp(prefix='crontab')
|
||||||
|
fileh = os.fdopen(filed, 'w')
|
||||||
|
|
||||||
|
fileh.write(self.render())
|
||||||
|
fileh.close()
|
||||||
|
|
||||||
|
# return if making a backup
|
||||||
|
if backup_file:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Add the entire crontab back to the user crontab
|
||||||
|
if not self.cron_file:
|
||||||
|
# quoting shell args for now but really this should be two non-shell calls. FIXME
|
||||||
|
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
|
||||||
|
os.unlink(path)
|
||||||
|
|
||||||
|
if rc != 0:
|
||||||
|
self.module.fail_json(msg=err)
|
||||||
|
|
||||||
|
def remove_variable_file(self):
|
||||||
|
try:
|
||||||
|
os.unlink(self.cron_file)
|
||||||
|
return True
|
||||||
|
except OSError, e:
|
||||||
|
# cron file does not exist
|
||||||
|
return False
|
||||||
|
except:
|
||||||
|
raise CronVarError("Unexpected error:", sys.exc_info()[0])
|
||||||
|
|
||||||
|
def parse_for_var(self, line):
|
||||||
|
lexer = shlex.shlex(line)
|
||||||
|
lexer.wordchars = self.wordchars
|
||||||
|
varname = lexer.get_token()
|
||||||
|
is_env_var = lexer.get_token() == '='
|
||||||
|
value = ''.join(lexer)
|
||||||
|
if is_env_var:
|
||||||
|
return (varname, value)
|
||||||
|
raise CronVarError("Not a variable.")
|
||||||
|
|
||||||
|
def find_variable(self, name):
|
||||||
|
comment = None
|
||||||
|
for l in self.lines:
|
||||||
|
try:
|
||||||
|
(varname, value) = self.parse_for_var(l)
|
||||||
|
if varname == name:
|
||||||
|
return value
|
||||||
|
except CronVarError:
|
||||||
|
pass
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_var_names(self):
|
||||||
|
var_names = []
|
||||||
|
for l in self.lines:
|
||||||
|
try:
|
||||||
|
(var_name, _) = self.parse_for_var(l)
|
||||||
|
var_names.append(var_name)
|
||||||
|
except CronVarError:
|
||||||
|
pass
|
||||||
|
return var_names
|
||||||
|
|
||||||
|
def add_variable(self, name, value, insertbefore, insertafter):
|
||||||
|
if insertbefore is None and insertafter is None:
|
||||||
|
# Add the variable to the top of the file.
|
||||||
|
self.lines.insert(0, "%s=%s" % (name, value))
|
||||||
|
else:
|
||||||
|
newlines = []
|
||||||
|
for l in self.lines:
|
||||||
|
try:
|
||||||
|
(varname, _) = self.parse_for_var(l) # Throws if not a var line
|
||||||
|
if varname == insertbefore:
|
||||||
|
newlines.append("%s=%s" % (name, value))
|
||||||
|
newlines.append(l)
|
||||||
|
elif varname == insertafter:
|
||||||
|
newlines.append(l)
|
||||||
|
newlines.append("%s=%s" % (name, value))
|
||||||
|
else:
|
||||||
|
raise CronVarError # Append.
|
||||||
|
except CronVarError:
|
||||||
|
newlines.append(l)
|
||||||
|
|
||||||
|
self.lines = newlines
|
||||||
|
|
||||||
|
def remove_variable(self, name):
|
||||||
|
self.update_variable(name, None, remove=True)
|
||||||
|
|
||||||
|
def update_variable(self, name, value, remove=False):
|
||||||
|
newlines = []
|
||||||
|
for l in self.lines:
|
||||||
|
try:
|
||||||
|
(varname, _) = self.parse_for_var(l) # Throws if not a var line
|
||||||
|
if varname != name:
|
||||||
|
raise CronVarError # Append.
|
||||||
|
if not remove:
|
||||||
|
newlines.append("%s=%s" % (name, value))
|
||||||
|
except CronVarError:
|
||||||
|
newlines.append(l)
|
||||||
|
|
||||||
|
self.lines = newlines
|
||||||
|
|
||||||
|
def render(self):
|
||||||
|
"""
|
||||||
|
Render a proper crontab
|
||||||
|
"""
|
||||||
|
result = '\n'.join(self.lines)
|
||||||
|
if result and result[-1] not in ['\n', '\r']:
|
||||||
|
result += '\n'
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _read_user_execute(self):
|
||||||
|
"""
|
||||||
|
Returns the command line for reading a crontab
|
||||||
|
"""
|
||||||
|
user = ''
|
||||||
|
|
||||||
|
if self.user:
|
||||||
|
if platform.system() == 'SunOS':
|
||||||
|
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
|
||||||
|
elif platform.system() == 'AIX':
|
||||||
|
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
|
||||||
|
elif platform.system() == 'HP-UX':
|
||||||
|
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
|
||||||
|
else:
|
||||||
|
user = '-u %s' % pipes.quote(self.user)
|
||||||
|
return "%s %s %s" % (CRONCMD , user, '-l')
|
||||||
|
|
||||||
|
def _write_execute(self, path):
|
||||||
|
"""
|
||||||
|
Return the command line for writing a crontab
|
||||||
|
"""
|
||||||
|
user = ''
|
||||||
|
if self.user:
|
||||||
|
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
|
||||||
|
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
|
||||||
|
else:
|
||||||
|
user = '-u %s' % pipes.quote(self.user)
|
||||||
|
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
|
||||||
|
|
||||||
|
#==================================================
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# The following example playbooks:
|
||||||
|
#
|
||||||
|
# - cronvar: name="SHELL" value="/bin/bash"
|
||||||
|
#
|
||||||
|
# - name: Set the email
|
||||||
|
# cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
|
||||||
|
#
|
||||||
|
# - name: Get rid of the old new host variable
|
||||||
|
# cronvar: name="NEW_HOST" state=absent
|
||||||
|
#
|
||||||
|
# Would produce:
|
||||||
|
# SHELL = /bin/bash
|
||||||
|
# EMAILTO = doug@ansibmod.con.com
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(required=True),
|
||||||
|
value=dict(required=False),
|
||||||
|
user=dict(required=False),
|
||||||
|
cron_file=dict(required=False),
|
||||||
|
insertafter=dict(default=None),
|
||||||
|
insertbefore=dict(default=None),
|
||||||
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
|
backup=dict(default=False, type='bool'),
|
||||||
|
),
|
||||||
|
mutually_exclusive=[['insertbefore', 'insertafter']],
|
||||||
|
supports_check_mode=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
name = module.params['name']
|
||||||
|
value = module.params['value']
|
||||||
|
user = module.params['user']
|
||||||
|
cron_file = module.params['cron_file']
|
||||||
|
insertafter = module.params['insertafter']
|
||||||
|
insertbefore = module.params['insertbefore']
|
||||||
|
state = module.params['state']
|
||||||
|
backup = module.params['backup']
|
||||||
|
ensure_present = state == 'present'
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
res_args = dict()
|
||||||
|
|
||||||
|
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
|
||||||
|
os.umask(022)
|
||||||
|
cronvar = CronVar(module, user, cron_file)
|
||||||
|
|
||||||
|
if cronvar.syslogging:
|
||||||
|
syslog.openlog('ansible-%s' % os.path.basename(__file__))
|
||||||
|
syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name)
|
||||||
|
|
||||||
|
# --- user input validation ---
|
||||||
|
|
||||||
|
if name is None and ensure_present:
|
||||||
|
module.fail_json(msg="You must specify 'name' to insert a new cron variabale")
|
||||||
|
|
||||||
|
if value is None and ensure_present:
|
||||||
|
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
|
||||||
|
|
||||||
|
if name is None and not ensure_present:
|
||||||
|
module.fail_json(msg="You must specify 'name' to remove a cron variable")
|
||||||
|
|
||||||
|
# if requested make a backup before making a change
|
||||||
|
if backup:
|
||||||
|
(_, backup_file) = tempfile.mkstemp(prefix='cronvar')
|
||||||
|
cronvar.write(backup_file)
|
||||||
|
|
||||||
|
if cronvar.cron_file and not name and not ensure_present:
|
||||||
|
changed = cronvar.remove_job_file()
|
||||||
|
module.exit_json(changed=changed, cron_file=cron_file, state=state)
|
||||||
|
|
||||||
|
old_value = cronvar.find_variable(name)
|
||||||
|
|
||||||
|
if ensure_present:
|
||||||
|
if old_value is None:
|
||||||
|
cronvar.add_variable(name, value, insertbefore, insertafter)
|
||||||
|
changed = True
|
||||||
|
elif old_value != value:
|
||||||
|
cronvar.update_variable(name, value)
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
if old_value is not None:
|
||||||
|
cronvar.remove_variable(name)
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
res_args = {
|
||||||
|
"vars": cronvar.get_var_names(),
|
||||||
|
"changed": changed
|
||||||
|
}
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
cronvar.write()
|
||||||
|
|
||||||
|
# retain the backup only if crontab or cron file have changed
|
||||||
|
if backup:
|
||||||
|
if changed:
|
||||||
|
res_args['backup_file'] = backup_file
|
||||||
|
else:
|
||||||
|
os.unlink(backup_file)
|
||||||
|
|
||||||
|
if cron_file:
|
||||||
|
res_args['cron_file'] = cron_file
|
||||||
|
|
||||||
|
module.exit_json(**res_args)
|
||||||
|
|
||||||
|
# --- should never get here
|
||||||
|
module.exit_json(msg="Unable to execute cronvar task.")
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
main()
|
@ -0,0 +1,254 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Ansible module to manage the ssh known_hosts file.
|
||||||
|
Copyright(c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
|
||||||
|
|
||||||
|
This module is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This module is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this module. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: known_hosts
|
||||||
|
short_description: Add or remove a host from the C(known_hosts) file
|
||||||
|
description:
|
||||||
|
- The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file.
|
||||||
|
This is useful if you're going to want to use the M(git) module over ssh, for example.
|
||||||
|
If you have a very large number of host keys to manage, you will find the M(template) module more useful.
|
||||||
|
version_added: "1.9"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
aliases: [ 'host' ]
|
||||||
|
description:
|
||||||
|
- The host to add or remove (must match a host specified in key)
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
key:
|
||||||
|
description:
|
||||||
|
- The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed)
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
path:
|
||||||
|
description:
|
||||||
|
- The known_hosts file to edit
|
||||||
|
required: no
|
||||||
|
default: "(homedir)+/.ssh/known_hosts"
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- I(present) to add the host, I(absent) to remove it.
|
||||||
|
choices: [ "present", "absent" ]
|
||||||
|
required: no
|
||||||
|
default: present
|
||||||
|
requirements: [ ]
|
||||||
|
author: Matthew Vernon
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Example using with_file to set the system known_hosts file
|
||||||
|
- name: tell the host about our servers it might want to ssh to
|
||||||
|
known_hosts: path='/etc/ssh/ssh_known_hosts'
|
||||||
|
host='foo.com.invalid'
|
||||||
|
key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Makes sure public host keys are present or absent in the given known_hosts
|
||||||
|
# file.
|
||||||
|
#
|
||||||
|
# Arguments
|
||||||
|
# =========
|
||||||
|
# name = hostname whose key should be added (alias: host)
|
||||||
|
# key = line(s) to add to known_hosts file
|
||||||
|
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
|
||||||
|
# state = absent|present (default: present)
|
||||||
|
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import tempfile
|
||||||
|
import errno
|
||||||
|
|
||||||
|
def enforce_state(module, params):
|
||||||
|
"""
|
||||||
|
Add or remove key.
|
||||||
|
"""
|
||||||
|
|
||||||
|
host = params["name"]
|
||||||
|
key = params.get("key",None)
|
||||||
|
port = params.get("port",None)
|
||||||
|
#expand the path parameter; otherwise module.add_path_info
|
||||||
|
#(called by exit_json) unhelpfully says the unexpanded path is absent.
|
||||||
|
path = os.path.expanduser(params.get("path"))
|
||||||
|
state = params.get("state")
|
||||||
|
#Find the ssh-keygen binary
|
||||||
|
sshkeygen = module.get_bin_path("ssh-keygen",True)
|
||||||
|
|
||||||
|
#trailing newline in files gets lost, so re-add if necessary
|
||||||
|
if key is not None and key[-1]!='\n':
|
||||||
|
key+='\n'
|
||||||
|
|
||||||
|
if key is None and state != "absent":
|
||||||
|
module.fail_json(msg="No key specified when adding a host")
|
||||||
|
|
||||||
|
sanity_check(module,host,key,sshkeygen)
|
||||||
|
|
||||||
|
current,replace=search_for_host_key(module,host,key,path,sshkeygen)
|
||||||
|
|
||||||
|
#We will change state if current==True & state!="present"
|
||||||
|
#or current==False & state=="present"
|
||||||
|
#i.e (current) XOR (state=="present")
|
||||||
|
#Alternatively, if replace is true (i.e. key present, and we must change it)
|
||||||
|
if module.check_mode:
|
||||||
|
module.exit_json(changed = replace or ((state=="present") != current))
|
||||||
|
|
||||||
|
#Now do the work.
|
||||||
|
|
||||||
|
#First, remove an extant entry if required
|
||||||
|
if replace==True or (current==True and state=="absent"):
|
||||||
|
module.run_command([sshkeygen,'-R',host,'-f',path],
|
||||||
|
check_rc=True)
|
||||||
|
params['changed'] = True
|
||||||
|
#Next, add a new (or replacing) entry
|
||||||
|
if replace==True or (current==False and state=="present"):
|
||||||
|
try:
|
||||||
|
inf=open(path,"r")
|
||||||
|
except IOError, e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
inf=None
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="Failed to read %s: %s" % \
|
||||||
|
(path,str(e)))
|
||||||
|
try:
|
||||||
|
outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path))
|
||||||
|
if inf is not None:
|
||||||
|
for line in inf:
|
||||||
|
outf.write(line)
|
||||||
|
inf.close()
|
||||||
|
outf.write(key)
|
||||||
|
outf.flush()
|
||||||
|
module.atomic_move(outf.name,path)
|
||||||
|
except (IOError,OSError),e:
|
||||||
|
module.fail_json(msg="Failed to write to file %s: %s" % \
|
||||||
|
(path,str(e)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
outf.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
params['changed'] = True
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def sanity_check(module,host,key,sshkeygen):
|
||||||
|
'''Check supplied key is sensible
|
||||||
|
|
||||||
|
host and key are parameters provided by the user; If the host
|
||||||
|
provided is inconsistent with the key supplied, then this function
|
||||||
|
quits, providing an error to the user.
|
||||||
|
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
|
||||||
|
'''
|
||||||
|
#If no key supplied, we're doing a removal, and have nothing to check here.
|
||||||
|
if key is None:
|
||||||
|
return
|
||||||
|
#Rather than parsing the key ourselves, get ssh-keygen to do it
|
||||||
|
#(this is essential for hashed keys, but otherwise useful, as the
|
||||||
|
#key question is whether ssh-keygen thinks the key matches the host).
|
||||||
|
|
||||||
|
#The approach is to write the key to a temporary file,
|
||||||
|
#and then attempt to look up the specified host in that file.
|
||||||
|
try:
|
||||||
|
outf=tempfile.NamedTemporaryFile()
|
||||||
|
outf.write(key)
|
||||||
|
outf.flush()
|
||||||
|
except IOError,e:
|
||||||
|
module.fail_json(msg="Failed to write to temporary file %s: %s" % \
|
||||||
|
(outf.name,str(e)))
|
||||||
|
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
|
||||||
|
'-f',outf.name],
|
||||||
|
check_rc=True)
|
||||||
|
try:
|
||||||
|
outf.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if stdout=='': #host not found
|
||||||
|
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
|
||||||
|
|
||||||
|
def search_for_host_key(module,host,key,path,sshkeygen):
|
||||||
|
'''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace)
|
||||||
|
|
||||||
|
Looks up host in the known_hosts file path; if it's there, looks to see
|
||||||
|
if one of those entries matches key. Returns:
|
||||||
|
current (Boolean): is host found in path?
|
||||||
|
replace (Boolean): is the key in path different to that supplied by user?
|
||||||
|
if current=False, then replace is always False.
|
||||||
|
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
|
||||||
|
'''
|
||||||
|
replace=False
|
||||||
|
if os.path.exists(path)==False:
|
||||||
|
return False, False
|
||||||
|
#openssh >=6.4 has changed ssh-keygen behaviour such that it returns
|
||||||
|
#1 if no host is found, whereas previously it returned 0
|
||||||
|
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path],
|
||||||
|
check_rc=False)
|
||||||
|
if stdout=='' and stderr=='' and (rc==0 or rc==1):
|
||||||
|
return False, False #host not found, no other errors
|
||||||
|
if rc!=0: #something went wrong
|
||||||
|
module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
|
||||||
|
|
||||||
|
#If user supplied no key, we don't want to try and replace anything with it
|
||||||
|
if key is None:
|
||||||
|
return True, False
|
||||||
|
|
||||||
|
lines=stdout.split('\n')
|
||||||
|
k=key.strip() #trim trailing newline
|
||||||
|
#ssh-keygen returns only the host we ask about in the host field,
|
||||||
|
#even if the key entry has multiple hosts. Emulate this behaviour here,
|
||||||
|
#otherwise we get false negatives.
|
||||||
|
#Only necessary for unhashed entries.
|
||||||
|
if k[0] !='|':
|
||||||
|
k=k.split()
|
||||||
|
#The optional "marker" field, used for @cert-authority or @revoked
|
||||||
|
if k[0][0] == '@':
|
||||||
|
k[1]=host
|
||||||
|
else:
|
||||||
|
k[0]=host
|
||||||
|
k=' '.join(k)
|
||||||
|
for l in lines:
|
||||||
|
if l=='':
|
||||||
|
continue
|
||||||
|
if l[0]=='#': #comment
|
||||||
|
continue
|
||||||
|
if k==l: #found a match
|
||||||
|
return True, False #current, not-replace
|
||||||
|
#No match found, return current and replace
|
||||||
|
return True, True
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True, type='str', aliases=['host']),
|
||||||
|
key = dict(required=False, type='str'),
|
||||||
|
path = dict(default="~/.ssh/known_hosts", type='str'),
|
||||||
|
state = dict(default='present', choices=['absent','present']),
|
||||||
|
),
|
||||||
|
supports_check_mode = True
|
||||||
|
)
|
||||||
|
|
||||||
|
results = enforce_state(module,module.params)
|
||||||
|
module.exit_json(**results)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
main()
|
@ -0,0 +1,277 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: svc
|
||||||
|
author: Brian Coca
|
||||||
|
version_added:
|
||||||
|
short_description: Manage daemontools services.
|
||||||
|
description:
|
||||||
|
- Controls daemontools services on remote hosts using the svc utility.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- Name of the service to manage.
|
||||||
|
state:
|
||||||
|
required: false
|
||||||
|
choices: [ started, stopped, restarted, reloaded, once ]
|
||||||
|
description:
|
||||||
|
- C(Started)/C(stopped) are idempotent actions that will not run
|
||||||
|
commands unless necessary. C(restarted) will always bounce the
|
||||||
|
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
|
||||||
|
C(reloaded) will send a sigusr1 (svc -u).
|
||||||
|
C(once) will run a normally downed svc once (svc -o), not really
|
||||||
|
an idempotent operation.
|
||||||
|
downed:
|
||||||
|
required: false
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
default: no
|
||||||
|
description:
|
||||||
|
- Should a 'down' file exist or not, if it exists it disables auto startup.
|
||||||
|
defaults to no. Downed does not imply stopped.
|
||||||
|
enabled:
|
||||||
|
required: false
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
description:
|
||||||
|
- Wheater the service is enabled or not, if disabled it also implies stopped.
|
||||||
|
Make note that a service can be enabled and downed (no auto restart).
|
||||||
|
service_dir:
|
||||||
|
required: false
|
||||||
|
default: /service
|
||||||
|
description:
|
||||||
|
- directory svscan watches for services
|
||||||
|
service_src:
|
||||||
|
required: false
|
||||||
|
description:
|
||||||
|
- directory where services are defined, the source of symlinks to service_dir.
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Example action to start svc dnscache, if not running
|
||||||
|
- svc: name=dnscache state=started
|
||||||
|
|
||||||
|
# Example action to stop svc dnscache, if running
|
||||||
|
- svc: name=dnscache state=stopped
|
||||||
|
|
||||||
|
# Example action to kill svc dnscache, in all cases
|
||||||
|
- svc : name=dnscache state=killed
|
||||||
|
|
||||||
|
# Example action to restart svc dnscache, in all cases
|
||||||
|
- svc : name=dnscache state=restarted
|
||||||
|
|
||||||
|
# Example action to reload svc dnscache, in all cases
|
||||||
|
- svc: name=dnscache state=reloaded
|
||||||
|
|
||||||
|
# Example using alt svc directory location
|
||||||
|
- svc: name=dnscache state=reloaded service_dir=/var/service
|
||||||
|
'''
|
||||||
|
|
||||||
|
import platform
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
def _load_dist_subclass(cls, *args, **kwargs):
|
||||||
|
'''
|
||||||
|
Used for derivative implementations
|
||||||
|
'''
|
||||||
|
subclass = None
|
||||||
|
|
||||||
|
distro = kwargs['module'].params['distro']
|
||||||
|
|
||||||
|
# get the most specific superclass for this platform
|
||||||
|
if distro is not None:
|
||||||
|
for sc in cls.__subclasses__():
|
||||||
|
if sc.distro is not None and sc.distro == distro:
|
||||||
|
subclass = sc
|
||||||
|
if subclass is None:
|
||||||
|
subclass = cls
|
||||||
|
|
||||||
|
return super(cls, subclass).__new__(subclass)
|
||||||
|
|
||||||
|
class Svc(object):
|
||||||
|
"""
|
||||||
|
Main class that handles daemontools, can be subclassed and overriden in case
|
||||||
|
we want to use a 'derivative' like encore, s6, etc
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
#def __new__(cls, *args, **kwargs):
|
||||||
|
# return _load_dist_subclass(cls, args, kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
self.extra_paths = [ '/command', '/usr/local/bin' ]
|
||||||
|
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
|
||||||
|
|
||||||
|
self.module = module
|
||||||
|
|
||||||
|
self.name = module.params['name']
|
||||||
|
self.service_dir = module.params['service_dir']
|
||||||
|
self.service_src = module.params['service_src']
|
||||||
|
self.enabled = None
|
||||||
|
self.downed = None
|
||||||
|
self.full_state = None
|
||||||
|
self.state = None
|
||||||
|
self.pid = None
|
||||||
|
self.duration = None
|
||||||
|
|
||||||
|
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
|
||||||
|
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
|
||||||
|
self.svc_full = '/'.join([ self.service_dir, self.name ])
|
||||||
|
self.src_full = '/'.join([ self.service_src, self.name ])
|
||||||
|
|
||||||
|
self.enabled = os.path.lexists(self.svc_full)
|
||||||
|
if self.enabled:
|
||||||
|
self.downed = os.path.lexists('%s/down' % self.svc_full)
|
||||||
|
self.get_status()
|
||||||
|
else:
|
||||||
|
self.downed = os.path.lexists('%s/down' % self.src_full)
|
||||||
|
self.state = 'stopped'
|
||||||
|
|
||||||
|
|
||||||
|
def enable(self):
|
||||||
|
if os.path.exists(self.src_full):
|
||||||
|
try:
|
||||||
|
os.symlink(self.src_full, self.svc_full)
|
||||||
|
except OSError, e:
|
||||||
|
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
|
||||||
|
else:
|
||||||
|
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
|
||||||
|
|
||||||
|
def disable(self):
|
||||||
|
try:
|
||||||
|
os.unlink(self.svc_full)
|
||||||
|
except OSError, e:
|
||||||
|
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
|
||||||
|
self.execute_command([self.svc_cmd,'-dx',self.src_full])
|
||||||
|
|
||||||
|
src_log = '%s/log' % self.src_full
|
||||||
|
if os.path.exists(src_log):
|
||||||
|
self.execute_command([self.svc_cmd,'-dx',src_log])
|
||||||
|
|
||||||
|
def get_status(self):
|
||||||
|
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
|
||||||
|
|
||||||
|
if err is not None and err:
|
||||||
|
self.full_state = self.state = err
|
||||||
|
else:
|
||||||
|
self.full_state = out
|
||||||
|
|
||||||
|
m = re.search('\(pid (\d+)\)', out)
|
||||||
|
if m:
|
||||||
|
self.pid = m.group(1)
|
||||||
|
|
||||||
|
m = re.search('(\d+) seconds', out)
|
||||||
|
if m:
|
||||||
|
self.duration = m.group(1)
|
||||||
|
|
||||||
|
if re.search(' up ', out):
|
||||||
|
self.state = 'start'
|
||||||
|
elif re.search(' down ', out):
|
||||||
|
self.state = 'stopp'
|
||||||
|
else:
|
||||||
|
self.state = 'unknown'
|
||||||
|
return
|
||||||
|
|
||||||
|
if re.search(' want ', out):
|
||||||
|
self.state += 'ing'
|
||||||
|
else:
|
||||||
|
self.state += 'ed'
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
|
||||||
|
|
||||||
|
def stopp(self):
|
||||||
|
return self.stop()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
|
||||||
|
|
||||||
|
def once(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
|
||||||
|
|
||||||
|
def reload(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
|
||||||
|
|
||||||
|
def kill(self):
|
||||||
|
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
|
||||||
|
|
||||||
|
def execute_command(self, cmd):
|
||||||
|
try:
|
||||||
|
(rc, out, err) = self.module.run_command(' '.join(cmd))
|
||||||
|
except Exception, e:
|
||||||
|
self.module.fail_json(msg="failed to execute: %s" % str(e))
|
||||||
|
return (rc, out, err)
|
||||||
|
|
||||||
|
def report(self):
|
||||||
|
self.get_status()
|
||||||
|
states = {}
|
||||||
|
for k in self.report_vars:
|
||||||
|
states[k] = self.__dict__[k]
|
||||||
|
return states
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Main control flow
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
|
||||||
|
enabled = dict(required=False, type='bool', choices=BOOLEANS),
|
||||||
|
downed = dict(required=False, type='bool', choices=BOOLEANS),
|
||||||
|
dist = dict(required=False, default='daemontools'),
|
||||||
|
service_dir = dict(required=False, default='/service'),
|
||||||
|
service_src = dict(required=False, default='/etc/service'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
state = module.params['state']
|
||||||
|
enabled = module.params['enabled']
|
||||||
|
downed = module.params['downed']
|
||||||
|
|
||||||
|
svc = Svc(module)
|
||||||
|
changed = False
|
||||||
|
orig_state = svc.report()
|
||||||
|
|
||||||
|
if enabled is not None and enabled != svc.enabled:
|
||||||
|
changed = True
|
||||||
|
if not module.check_mode:
|
||||||
|
try:
|
||||||
|
if enabled:
|
||||||
|
svc.enable()
|
||||||
|
else:
|
||||||
|
svc.disable()
|
||||||
|
except (OSError, IOError), e:
|
||||||
|
module.fail_json(msg="Could change service link: %s" % str(e))
|
||||||
|
|
||||||
|
if state is not None and state != svc.state:
|
||||||
|
changed = True
|
||||||
|
if not module.check_mode:
|
||||||
|
getattr(svc,state[:-2])()
|
||||||
|
|
||||||
|
if downed is not None and downed != svc.downed:
|
||||||
|
changed = True
|
||||||
|
if not module.check_mode:
|
||||||
|
d_file = "%s/down" % svc.svc_full
|
||||||
|
try:
|
||||||
|
if downed:
|
||||||
|
open(d_file, "a").close()
|
||||||
|
else:
|
||||||
|
os.unlink(d_file)
|
||||||
|
except (OSError, IOError), e:
|
||||||
|
module.fail_json(msg="Could change downed file: %s " % (str(e)))
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, svc=svc.report())
|
||||||
|
|
||||||
|
|
||||||
|
# this is magic, not normal python include
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
main()
|
Loading…
Reference in New Issue