diff --git a/README.md b/README.md index 5d9c47f8303..9a0ddb6c898 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ All new modules should be submitted here, and have a chance to be promoted to co Reporting bugs ============== -Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of modlue documentation page at [docs.ansible.com](http://docs.ansible.com/). +Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). Testing modules =============== @@ -18,4 +18,9 @@ Ansible [module development guide](http://docs.ansible.com/developing_modules.ht License ======= -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. +As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. + +Installation +============ + +There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. diff --git a/cloud/amazon/__init__.py b/cloud/amazon/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py new file mode 100755 index 00000000000..b58bcd6e1d0 --- /dev/null +++ b/cloud/amazon/cloudtrail.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: cloudtrail +short_description: manage CloudTrail creation and deletion +description: + - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. +version_added: "2.0" +author: Ted Timmons +options: + state: + description: + - add or remove CloudTrail configuration. + required: true + choices: ['enabled', 'disabled'] + name: + description: + - name for given CloudTrail configuration. + - This is a primary key and is used to identify the configuration. + s3_bucket_prefix: + description: + - bucket to place CloudTrail in. + - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html) + - required when state=enabled. + required: false + s3_key_prefix: + description: + - prefix to keys in bucket. A trailing slash is not necessary and will be removed. + required: false + include_global_events: + description: + - record API calls from global services such as IAM and STS? + required: false + default: false + choices: ["true", "false"] + + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + version_added: "1.5" + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + version_added: "1.5" + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + version_added: "1.5" + +extends_documentation_fragment: aws +""" + +EXAMPLES = """ + - name: enable cloudtrail + local_action: cloudtrail + state=enabled name=main s3_bucket_name=ourbucket + s3_key_prefix=cloudtrail region=us-east-1 + + - name: enable cloudtrail with different configuration + local_action: cloudtrail + state=enabled name=main s3_bucket_name=ourbucket2 + s3_key_prefix='' region=us-east-1 + + - name: remove cloudtrail + local_action: cloudtrail state=absent name=main region=us-east-1 +""" + +import time +import sys +import os +from collections import Counter + +boto_import_failed = False +try: + import boto + import boto.cloudtrail + from boto.regioninfo import RegionInfo +except ImportError: + boto_import_failed = True + +class CloudTrailManager: + """Handles cloudtrail configuration""" + + def __init__(self, module, region=None, **aws_connect_params): + self.module = module + self.region = region + self.aws_connect_params = aws_connect_params + self.changed = False + + try: + self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + self.module.fail_json(msg=str(e)) + + def view_status(self, name): + return self.conn.get_trail_status(name) + + def view(self, name): + ret = self.conn.describe_trails(trail_name_list=[name]) + trailList = ret.get('trailList', []) + if len(trailList) == 1: + return trailList[0] + return None + + def exists(self, name=None): + ret = self.view(name) + if ret: + return True + return False + + def enable_logging(self, name): + '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.''' + self.conn.start_logging(name) + + + def enable(self, **create_args): + return self.conn.create_trail(**create_args) + + def update(self, **create_args): + return self.conn.update_trail(**create_args) + + def delete(self, name): + '''Delete a given cloudtrial configuration. Throws Exception on error.''' + self.conn.delete_trail(name) + + + +def main(): + + if not has_libcloud: + module.fail_json(msg='boto is required.') + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['enabled', 'disabled'] }, + name={'required': True, 'type': 'str' }, + s3_bucket_name={'required': False, 'type': 'str' }, + s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, + include_global_events={'default':True, 'required': False, 'type': 'bool' }, + )) + required_together = ( ['state', 's3_bucket_name'] ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + ec2_url, access_key, secret_key, region = get_ec2_creds(module) + aws_connect_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ct_name = module.params['name'] + s3_bucket_name = module.params['s3_bucket_name'] + # remove trailing slash from the key prefix, really messes up the key structure. + s3_key_prefix = module.params['s3_key_prefix'].rstrip('/') + include_global_events = module.params['include_global_events'] + + #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + # module.fail_json(msg="ELBs are required for registration or viewing") + + cf_man = CloudTrailManager(module, region=region, **aws_connect_params) + + results = { 'changed': False } + if module.params['state'] == 'enabled': + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + results['view'] = cf_man.view(ct_name) + # only update if the values have changed. + if results['view']['S3BucketName'] != s3_bucket_name or \ + results['view']['S3KeyPrefix'] != s3_key_prefix or \ + results['view']['IncludeGlobalServiceEvents'] != include_global_events: + if not module.check_mode: + results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + else: + if not module.check_mode: + # doesn't exist. create it. + results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + + # given cloudtrail should exist now. Enable the logging. + results['view_status'] = cf_man.view_status(ct_name) + results['was_logging_enabled'] = results['view_status'].get('IsLogging', False) + if not results['was_logging_enabled']: + if not module.check_mode: + cf_man.enable_logging(ct_name) + results['logging_enabled'] = True + results['changed'] = True + + # delete the cloudtrai + elif module.params['state'] == 'disabled': + # check to see if it exists before deleting. + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + # it exists, so we should delete it and mark changed. + if not module.check_mode: + cf_man.delete(ct_name) + results['changed'] = True + + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py new file mode 100644 index 00000000000..07b9cf42d6a --- /dev/null +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_affinitygroup +short_description: Manages affinity groups on Apache CloudStack based clouds. +description: + - Create and remove affinity groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the affinity group. + required: true + affinty_type: + description: + - Type of the affinity group. If not specified, first found affinity type is used. + required: false + default: null + description: + description: + - Description of the affinity group. + required: false + default: null + state: + description: + - State of the affinity group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Create a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + affinty_type: host anti-affinity + + +# Remove a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of affinity group. + returned: success + type: string + sample: app +description: + description: Description of affinity group. + returned: success + type: string + sample: application affinity group +affinity_type: + description: Type of affinity group. + returned: success + type: string + sample: host anti-affinity +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.affinity_group = None + + + def get_affinity_group(self): + if not self.affinity_group: + affinity_group_name = self.module.params.get('name') + + affinity_groups = self.cs.listAffinityGroups() + if affinity_groups: + for a in affinity_groups['affinitygroup']: + if a['name'] == affinity_group_name: + self.affinity_group = a + break + return self.affinity_group + + + def get_affinity_type(self): + affinity_type = self.module.params.get('affinty_type') + + affinity_types = self.cs.listAffinityGroupTypes() + if affinity_types: + if not affinity_type: + return affinity_types['affinityGroupType'][0]['type'] + + for a in affinity_types['affinityGroupType']: + if a['type'] == affinity_type: + return a['type'] + self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) + + + def create_affinity_group(self): + affinity_group = self.get_affinity_group() + if not affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['type'] = self.get_affinity_type() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + affinity_group = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def remove_affinity_group(self): + affinity_group = self.get_affinity_group() + if affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + + if not self.module.check_mode: + res = self.cs.deleteAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def get_result(self, affinity_group): + if affinity_group: + if 'name' in affinity_group: + self.result['name'] = affinity_group['name'] + if 'description' in affinity_group: + self.result['description'] = affinity_group['description'] + if 'type' in affinity_group: + self.result['affinity_type'] = affinity_group['type'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + affinty_type = dict(default=None), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ag = AnsibleCloudStackAffinityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + affinity_group = acs_ag.remove_affinity_group() + else: + affinity_group = acs_ag.create_affinity_group() + + result = acs_ag.get_result(affinity_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py new file mode 100644 index 00000000000..13f114c1b35 --- /dev/null +++ b/cloud/cloudstack/cs_firewall.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: cs_firewall +short_description: Manages firewall rules on Apache CloudStack based clouds. +description: + - Creates and removes firewall rules. +version_added: '2.0' +author: René Moser +options: + ip_address: + description: + - Public IP address the rule is assigned to. + required: true + state: + description: + - State of the firewall rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the firewall rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp' ] + cidr: + description: + - CIDR (full notation) to be used for firewall rule. + required: false + default: '0.0.0.0/0' + start_port: + description: + - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + end_port: + description: + - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + icmp_type: + description: + - Type of the icmp message being sent. Considered if C(protocol=icmp). + required: false + default: null + icmp_code: + description: + - Error code for this icmp message. Considered if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + start_port: 80 + end_port: 80 + cidr: 1.2.3.4/32 + + +# Allow inbound tcp/udp port 53 to 4.3.2.1 +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + start_port: 53 + end_port: 53 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Ensure firewall rule is removed +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + start_port: 8000 + end_port: 8888 + cidr: 17.0.0.0/8 + state: absent +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackFirewall(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.firewall_rule = None + + + def get_firewall_rule(self): + if not self.firewall_rule: + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not icmp_type: + self.module.fail_json(msg="no icmp_type set") + + args = {} + args['ipaddressid'] = self.get_ip_address_id() + args['projectid'] = self.get_project_id() + + firewall_rules = self.cs.listFirewallRules(**args) + if firewall_rules and 'firewallrule' in firewall_rules: + for rule in firewall_rules['firewallrule']: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) + + if type_match and protocol_match: + self.firewall_rule = rule + break + return self.firewall_rule + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == rule['icmpcode'] \ + and icmp_type == rule['icmptype'] + + + def _type_cidr_match(self, rule, cidr): + return cidr == rule['cidrlist'] + + + def create_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + self.result['changed'] = True + args = {} + args['cidrlist'] = self.module.params.get('cidr') + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['ipaddressid'] = self.get_ip_address_id() + + if not self.module.check_mode: + firewall_rule = self.cs.createFirewallRule(**args) + + return firewall_rule + + + def remove_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + args = {} + args['id'] = firewall_rule['id'] + + if not self.module.check_mode: + res = self.cs.deleteFirewallRule(**args) + + return firewall_rule + + + def get_result(self, firewall_rule): + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True, default=None), + cidr = dict(default='0.0.0.0/0'), + protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + required_together = ( + ['start_port', 'end_port'], + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_fw = AnsibleCloudStackFirewall(module) + + state = module.params.get('state') + if state in ['absent']: + fw_rule = acs_fw.remove_firewall_rule() + else: + fw_rule = acs_fw.create_firewall_rule() + + result = acs_fw.get_result(fw_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py new file mode 100644 index 00000000000..8680f20ada5 --- /dev/null +++ b/cloud/cloudstack/cs_instance.py @@ -0,0 +1,788 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instance +short_description: Manages instances and virtual machines on Apache CloudStack based clouds. +description: + - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Host name of the instance. C(name) can only contain ASCII letters. + required: true + display_name: + description: + - Custom display name of the instances. + required: false + default: null + group: + description: + - Group in where the new instance should be in. + required: false + default: null + state: + description: + - State of the instance. + required: false + default: 'present' + choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] + service_offering: + description: + - Name or id of the service offering of the new instance. If not set, first found service offering is used. + required: false + default: null + template: + description: + - Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option. + required: false + default: null + iso: + description: + - Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option. + required: false + default: null + hypervisor: + description: + - Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used. + required: false + default: null + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + keyboard: + description: + - Keyboard device type for the instance. + required: false + default: null + choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] + networks: + description: + - List of networks to use for the new instance. + required: false + default: [] + aliases: [ 'network' ] + ip_address: + description: + - IPv4 address for default instance's network during creation + required: false + default: null + ip6_address: + description: + - IPv6 address for default instance's network. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + required: false + default: null + disk_size: + description: + - Disk size in GByte required if deploying instance from ISO. + required: false + default: null + security_groups: + description: + - List of security groups the instance to be applied to. + required: false + default: [] + aliases: [ 'security_group' ] + project: + description: + - Name of the project the instance to be deployed in. + required: false + default: null + zone: + description: + - Name of the zone in which the instance shoud be deployed. If not set, default zone is used. + required: false + default: null + ssh_key: + description: + - Name of the SSH key to be deployed on the new instance. + required: false + default: null + affinity_groups: + description: + - Affinity groups names to be applied to the new instance. + required: false + default: [] + aliases: [ 'affinity_group' ] + user_data: + description: + - Optional data (ASCII) that can be sent to the instance upon a successful deployment. + - The data will be automatically base64 encoded. + - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. + required: false + default: null + force: + description: + - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. + required: false + default: true + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - "If you want to delete all tags, set a empty list e.g. C(tags: [])." + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Create a instance on CloudStack from an ISO +# NOTE: Names of offerings and ISOs depending on the CloudStack configuration. +- local_action: + module: cs_instance + name: web-vm-1 + iso: Linux Debian 7 64-bit + hypervisor: VMware + project: Integration + zone: ch-zrh-ix-01 + service_offering: 1cpu_1gb + disk_offering: PerfPlus Storage + disk_size: 20 + networks: + - Server Integration + - Sync Integration + - Storage Integration + + +# For changing a running instance, use the 'force' parameter +- local_action: + module: cs_instance + name: web-vm-1 + display_name: web-vm-01.example.com + iso: Linux Debian 7 64-bit + service_offering: 2cpu_2gb + force: yes + + +# Create or update a instance on Exoscale's public cloud +- local_action: + module: cs_instance + name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ssh_key: john@example.com + tags: + - { key: admin, value: john } + - { key: foo, value: bar } + register: vm + +- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}' + + +# Ensure a instance has stopped +- local_action: cs_instance name=web-vm-1 state=stopped + + +# Ensure a instance is running +- local_action: cs_instance name=web-vm-1 state=started + + +# Remove a instance +- local_action: cs_instance name=web-vm-1 state=absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: string + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: string + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: string + sample: web +created: + description: Date of the instance was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: boolean + sample: true +password: + description: The password of the instance if exists. + returned: success + type: string + sample: Ge2oe7Do +ssh_key: + description: Name of ssh key deployed to instance. + returned: success + type: string + sample: key@work +project: + description: Name of project the instance is related to. + returned: success + type: string + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: string + sample: 10.23.37.42 +public_ip: + description: Public IP address with instance via static nat rule. + returned: success + type: string + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +service_offering: + description: Name of the service offering the instance has. + returned: success + type: string + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: string + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: string + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +import base64 + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstance(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance = None + + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + + service_offerings = self.cs.listServiceOfferings() + if service_offerings: + if not service_offering: + return service_offerings['serviceoffering'][0]['id'] + + for s in service_offerings['serviceoffering']: + if service_offering in [ s['name'], s['id'] ]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + + def get_template_or_iso_id(self): + template = self.module.params.get('template') + iso = self.module.params.get('iso') + + if not template and not iso: + self.module.fail_json(msg="Template or ISO is required.") + + if template and iso: + self.module.fail_json(msg="Template are ISO are mutually exclusive.") + + if template: + templates = self.cs.listTemplates(templatefilter='executable') + if templates: + for t in templates['template']: + if template in [ t['displaytext'], t['name'], t['id'] ]: + return t['id'] + self.module.fail_json(msg="Template '%s' not found" % template) + + elif iso: + isos = self.cs.listIsos() + if isos: + for i in isos['iso']: + if iso in [ i['displaytext'], i['name'], i['id'] ]: + return i['id'] + self.module.fail_json(msg="ISO '%s' not found" % iso) + + + def get_disk_offering_id(self): + disk_offering = self.module.params.get('disk_offering') + + if not disk_offering: + return None + + disk_offerings = self.cs.listDiskOfferings() + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: + return d['id'] + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = {} + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + instances = self.cs.listVirtualMachines(**args) + if instances: + for v in instances['virtualmachine']: + if instance_name in [ v['name'], v['displayname'], v['id'] ]: + self.instance = v + break + return self.instance + + + def get_network_ids(self): + network_names = self.module.params.get('networks') + if not network_names: + return None + + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + network_ids = [] + network_displaytexts = [] + for network_name in network_names: + for n in networks['network']: + if network_name in [ n['displaytext'], n['name'], n['id'] ]: + network_ids.append(n['id']) + network_displaytexts.append(n['name']) + break + + if len(network_ids) != len(network_names): + self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) + + return ','.join(network_ids) + + + def present_instance(self): + instance = self.get_instance() + if not instance: + instance = self.deploy_instance() + else: + instance = self.update_instance(instance) + + instance = self.ensure_tags(resource=instance, resource_type='UserVm') + + return instance + + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data: + user_data = base64.b64encode(user_data) + return user_data + + + def get_display_name(self): + display_name = self.module.params.get('display_name') + if not display_name: + display_name = self.module.params.get('name') + return display_name + + + def deploy_instance(self): + self.result['changed'] = True + + args = {} + args['templateid'] = self.get_template_or_iso_id() + args['zoneid'] = self.get_zone_id() + args['serviceofferingid'] = self.get_service_offering_id() + args['projectid'] = self.get_project_id() + args['diskofferingid'] = self.get_disk_offering_id() + args['networkids'] = self.get_network_ids() + args['hypervisor'] = self.get_hypervisor() + args['userdata'] = self.get_user_data() + args['keyboard'] = self.module.params.get('keyboard') + args['ipaddress'] = self.module.params.get('ip_address') + args['ip6address'] = self.module.params.get('ip6_address') + args['name'] = self.module.params.get('name') + args['group'] = self.module.params.get('group') + args['keypair'] = self.module.params.get('ssh_key') + args['size'] = self.module.params.get('disk_size') + args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) + args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + + instance = None + if not self.module.check_mode: + instance = self.cs.deployVirtualMachine(**args) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def update_instance(self, instance): + args_service_offering = {} + args_service_offering['id'] = instance['id'] + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + + args_instance_update = {} + args_instance_update['id'] = instance['id'] + args_instance_update['group'] = self.module.params.get('group') + args_instance_update['displayname'] = self.get_display_name() + args_instance_update['userdata'] = self.get_user_data() + args_instance_update['ostypeid'] = self.get_os_type_id() + + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + args_ssh_key['projectid'] = self.get_project_id() + + if self._has_changed(args_service_offering, instance) or \ + self._has_changed(args_instance_update, instance) or \ + self._has_changed(args_ssh_key, instance): + + force = self.module.params.get('force') + instance_state = instance['state'].lower() + + if instance_state == 'stopped' or force: + self.result['changed'] = True + if not self.module.check_mode: + + # Ensure VM has stopped + instance = self.stop_instance() + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Change service offering + if self._has_changed(args_service_offering, instance): + res = self.cs.changeServiceForVirtualMachine(**args_service_offering) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Update VM + if self._has_changed(args_instance_update, instance): + res = self.cs.updateVirtualMachine(**args_instance_update) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Reset SSH key + if self._has_changed(args_ssh_key, instance): + instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Start VM again if it was running before + if instance_state == 'running': + instance = self.start_instance() + return instance + + + def absent_instance(self): + instance = self.get_instance() + if instance: + if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def expunge_instance(self): + instance = self.get_instance() + if instance: + res = {} + if instance['state'].lower() in [ 'destroying', 'destroyed' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.expungeVirtualMachine(id=instance['id']) + + elif instance['state'].lower() not in [ 'expunging' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def stop_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in ['stopping', 'stopped']: + return instance + + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.stopVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def start_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name')) + + if instance['state'].lower() in ['starting', 'running']: + return instance + + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.startVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def restart_instance(self): + instance = self.get_instance() + if not instance: + module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in [ 'running', 'starting' ]: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.rebootVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + + elif instance['state'].lower() in [ 'stopping', 'stopped' ]: + instance = self.start_instance() + return instance + + + def get_result(self, instance): + if instance: + if 'id' in instance: + self.result['id'] = instance['id'] + if 'name' in instance: + self.result['name'] = instance['name'] + if 'displayname' in instance: + self.result['display_name'] = instance['displayname'] + if 'group' in instance: + self.result['group'] = instance['group'] + if 'project' in instance: + self.result['project'] = instance['project'] + if 'publicip' in instance: + self.result['public_ip'] = instance['public_ip'] + if 'passwordenabled' in instance: + self.result['password_enabled'] = instance['passwordenabled'] + if 'password' in instance: + self.result['password'] = instance['password'] + if 'serviceofferingname' in instance: + self.result['service_offering'] = instance['serviceofferingname'] + if 'zonename' in instance: + self.result['zone'] = instance['zonename'] + if 'templatename' in instance: + self.result['template'] = instance['templatename'] + if 'isoname' in instance: + self.result['iso'] = instance['isoname'] + if 'keypair' in instance: + self.result['ssh_key'] = instance['keypair'] + if 'created' in instance: + self.result['created'] = instance['created'] + if 'state' in instance: + self.result['state'] = instance['state'] + if 'tags' in instance: + self.result['tags'] = [] + for tag in instance['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault']: + self.result['default_ip'] = nic['ipaddress'] + return self.result + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + display_name = dict(default=None), + group = dict(default=None), + state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering = dict(default=None), + template = dict(default=None), + iso = dict(default=None), + networks = dict(type='list', aliases=[ 'network' ], default=None), + ip_address = dict(defaul=None), + ip6_address = dict(defaul=None), + disk_offering = dict(default=None), + disk_size = dict(type='int', default=None), + keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), + hypervisor = dict(default=None), + security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), + affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + project = dict(default=None), + user_data = dict(default=None), + zone = dict(default=None), + ssh_key = dict(default=None), + force = dict(choices=BOOLEANS, default=False), + tags = dict(type='list', aliases=[ 'tag' ], default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_instance = AnsibleCloudStackInstance(module) + + state = module.params.get('state') + + if state in ['absent', 'destroyed']: + instance = acs_instance.absent_instance() + + elif state in ['expunged']: + instance = acs_instance.expunge_instance() + + elif state in ['present', 'deployed']: + instance = acs_instance.present_instance() + + elif state in ['stopped']: + instance = acs_instance.stop_instance() + + elif state in ['started']: + instance = acs_instance.start_instance() + + elif state in ['restarted']: + instance = acs_instance.restart_instance() + + if instance and 'state' in instance and instance['state'].lower() == 'error': + module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) + + result = acs_instance.get_result(instance) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py new file mode 100644 index 00000000000..83af1e1783e --- /dev/null +++ b/cloud/cloudstack/cs_iso.py @@ -0,0 +1,324 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_iso +short_description: Manages ISOs images on Apache CloudStack based clouds. +description: + - Register and remove ISO images. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the ISO. + required: true + url: + description: + - URL where the ISO can be downloaded from. Required if C(state) is present. + required: false + default: null + os_type: + description: + - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present. + required: false + default: null + is_ready: + description: + - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false). + required: false + default: false + aliases: [] + is_public: + description: + - Register the ISO to be publicly available to all users. Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the ISO to be featured. Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present. + required: false + default: false + aliases: [] + checksum: + description: + - The MD5 checksum value of this ISO. If set, we search by checksum instead of name. + required: false + default: false + bootable: + description: + - Register the ISO to be bootable. Only used if C(state) is present. + required: false + default: true + project: + description: + - Name of the project the ISO to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used. + required: false + default: null + iso_filter: + description: + - Name of the filter used to search for the ISO. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ] + state: + description: + - State of the ISO. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Register an ISO if ISO name does not already exist. +- local_action: + module: cs_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + + +# Register an ISO with given name if ISO md5 checksum does not already exist. +- local_action: + module: cs_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: + checksum: 0b31bccccb048d20b551f70830bb7ad0 + + +# Remove an ISO by name +- local_action: + module: cs_iso + name: Debian 7 64-bit + state: absent + + +# Remove an ISO by checksum +- local_action: + module: cs_iso + name: Debian 7 64-bit + checksum: 0b31bccccb048d20b551f70830bb7ad0 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the ISO. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Text to be displayed of the ISO. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +zone: + description: Name of zone the ISO is registered in. + returned: success + type: string + sample: zuerich +status: + description: Status of the ISO. + returned: success + type: string + sample: Successfully Installed +is_ready: + description: True if the ISO is ready to be deployed from. + returned: success + type: boolean + sample: true +checksum: + description: MD5 checksum of the ISO. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackIso(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.iso = None + + def register_iso(self): + iso = self.get_iso() + if not iso: + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + + args['bootable'] = self.module.params.get('bootable') + args['ostypeid'] = self.get_os_type_id() + if args['bootable'] and not args['ostypeid']: + self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") + + args['url'] = self.module.params.get('url') + if not args['url']: + self.module.fail_json(msg="URL is requried.") + + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('name') + args['checksum'] = self.module.params.get('checksum') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.registerIso(**args) + iso = res['iso'][0] + return iso + + + def get_iso(self): + if not self.iso: + args = {} + args['isready'] = self.module.params.get('is_ready') + args['isofilter'] = self.module.params.get('iso_filter') + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + isos = self.cs.listIsos(**args) + if isos: + if not checksum: + self.iso = isos['iso'][0] + else: + for i in isos['iso']: + if i['checksum'] == checksum: + self.iso = i + break + return self.iso + + + def remove_iso(self): + iso = self.get_iso() + if iso: + self.result['changed'] = True + args = {} + args['id'] = iso['id'] + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + if not self.module.check_mode: + res = self.cs.deleteIso(**args) + return iso + + + def get_result(self, iso): + if iso: + if 'displaytext' in iso: + self.result['displaytext'] = iso['displaytext'] + if 'name' in iso: + self.result['name'] = iso['name'] + if 'zonename' in iso: + self.result['zone'] = iso['zonename'] + if 'checksum' in iso: + self.result['checksum'] = iso['checksum'] + if 'status' in iso: + self.result['status'] = iso['status'] + if 'isready' in iso: + self.result['is_ready'] = iso['isready'] + if 'created' in iso: + self.result['created'] = iso['created'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + url = dict(default=None), + os_type = dict(default=None), + zone = dict(default=None), + iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + project = dict(default=None), + checksum = dict(default=None), + is_ready = dict(choices=BOOLEANS, default=False), + bootable = dict(choices=BOOLEANS, default=True), + is_featured = dict(choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_iso = AnsibleCloudStackIso(module) + + state = module.params.get('state') + if state in ['absent']: + iso = acs_iso.remove_iso() + else: + iso = acs_iso.register_iso() + + result = acs_iso.get_result(iso) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py new file mode 100644 index 00000000000..50556da5bb3 --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup +short_description: Manages security groups on Apache CloudStack based clouds. +description: + - Create and remove security groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: false + default: null + state: + description: + - State of the security group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + project: + description: + - Name of the project the security group to be created in. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Create a security group +- local_action: + module: cs_securitygroup + name: default + description: default security group + + +# Remove a security group +- local_action: + module: cs_securitygroup + name: default + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of security group. + returned: success + type: string + sample: app +description: + description: Description of security group. + returned: success + type: string + sample: application security group +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.security_group = None + + + def get_security_group(self): + if not self.security_group: + sg_name = self.module.params.get('name') + args = {} + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if sgs: + for s in sgs['securitygroup']: + if s['name'] == sg_name: + self.security_group = s + break + return self.security_group + + + def create_security_group(self): + security_group = self.get_security_group() + if not security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + security_group = res['securitygroup'] + + return security_group + + + def remove_security_group(self): + security_group = self.get_security_group() + if security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + + if not self.module.check_mode: + res = self.cs.deleteSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + return security_group + + + def get_result(self, security_group): + if security_group: + if 'name' in security_group: + self.result['name'] = security_group['name'] + if 'description' in security_group: + self.result['description'] = security_group['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg = AnsibleCloudStackSecurityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + sg = acs_sg.remove_security_group() + else: + sg = acs_sg.create_security_group() + + result = acs_sg.get_result(sg) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py new file mode 100644 index 00000000000..1f2dac6f267 --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup_rule +short_description: Manages security group rules on Apache CloudStack based clouds. +description: + - Add and remove security group rules. +version_added: '2.0' +author: René Moser +options: + security_group: + description: + - Name of the security group the rule is related to. The security group must be existing. + required: true + state: + description: + - State of the security group rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the security group rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ] + type: + description: + - Ingress or egress security group rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] + cidr: + description: + - CIDR (full notation) to be used for security group rule. + required: false + default: '0.0.0.0/0' + user_security_group: + description: + - Security group this rule is based of. + required: false + default: null + start_port: + description: + - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + aliases: [ 'port' ] + end_port: + description: + - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. + required: false + default: null + icmp_type: + description: + - Type of the icmp message being sent. Required if C(protocol=icmp). + required: false + default: null + icmp_code: + description: + - Error code for this icmp message. Required if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project the security group to be created in. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + cidr: 1.2.3.4/32 + + +# Allow tcp/udp outbound added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + type: egress + start_port: 1 + end_port: 65535 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Allow inbound icmp from 0.0.0.0/0 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + protocol: icmp + icmp_code: -1 + icmp_type: -1 + + +# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + state: absent + + +# Allow inbound port 80/tcp from security group web added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + user_security_group: web +''' + +RETURN = ''' +--- +security_group: + description: security group of the rule. + returned: success + type: string + sample: default +type: + description: type of the rule. + returned: success + type: string + sample: ingress +cidr: + description: CIDR of the rule. + returned: success and cidr is defined + type: string + sample: 0.0.0.0/0 +user_security_group: + description: user security group of the rule. + returned: success and user_security_group is defined + type: string + sample: default +protocol: + description: protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: end port of the rule. + returned: success + type: int + sample: 80 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == int(rule['icmpcode']) \ + and icmp_type == int(rule['icmptype']) + + + def _ah_esp_gre_match(self, rule, protocol): + return protocol in ['ah', 'esp', 'gre'] \ + and protocol == rule['protocol'] + + + def _type_security_group_match(self, rule, security_group_name): + return security_group_name \ + and 'securitygroupname' in rule \ + and security_group_name == rule['securitygroupname'] + + + def _type_cidr_match(self, rule, cidr): + return 'cidr' in rule \ + and cidr == rule['cidr'] + + + def _get_rule(self, rules): + user_security_group_name = self.module.params.get('user_security_group') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if not end_port: + end_port = start_port + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not (icmp_type and icmp_code): + self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) + + for rule in rules: + if user_security_group_name: + type_match = self._type_security_group_match(rule, user_security_group_name) + else: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._ah_esp_gre_match(rule, protocol) + ) + + if type_match and protocol_match: + return rule + return None + + + def get_security_group(self, security_group_name=None): + if not security_group_name: + security_group_name = self.module.params.get('security_group') + args = {} + args['securitygroupname'] = security_group_name + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if not sgs or 'securitygroup' not in sgs: + self.module.fail_json(msg="security group '%s' not found" % security_group_name) + return sgs['securitygroup'][0] + + + def add_rule(self): + security_group = self.get_security_group() + + args = {} + user_security_group_name = self.module.params.get('user_security_group') + + # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. + # that is why we ignore if we have a user_security_group. + if user_security_group_name: + args['usersecuritygrouplist'] = [] + user_security_group = self.get_security_group(user_security_group_name) + args['usersecuritygrouplist'].append({ + 'group': user_security_group['name'], + 'account': user_security_group['account'], + }) + else: + args['cidrlist'] = self.module.params.get('cidr') + + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['projectid'] = self.get_project_id() + args['securitygroupid'] = security_group['id'] + + if not args['endport']: + args['endport'] = args['startport'] + + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupIngress(**args) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupEgress(**args) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + security_group = self._poll_job(res, 'securitygroup') + return security_group + + + def remove_rule(self): + security_group = self.get_security_group() + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid']) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'securitygroup') + return security_group + + + def get_result(self, security_group_rule): + type = self.module.params.get('type') + + key = 'ingressrule' + if type == 'egress': + key = 'egressrule' + + self.result['type'] = type + self.result['security_group'] = self.module.params.get('security_group') + + if key in security_group_rule and security_group_rule[key]: + if 'securitygroupname' in security_group_rule[key][0]: + self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname'] + if 'cidr' in security_group_rule[key][0]: + self.result['cidr'] = security_group_rule[key][0]['cidr'] + if 'protocol' in security_group_rule[key][0]: + self.result['protocol'] = security_group_rule[key][0]['protocol'] + if 'startport' in security_group_rule[key][0]: + self.result['start_port'] = security_group_rule[key][0]['startport'] + if 'endport' in security_group_rule[key][0]: + self.result['end_port'] = security_group_rule[key][0]['endport'] + if 'icmpcode' in security_group_rule[key][0]: + self.result['icmp_code'] = security_group_rule[key][0]['icmpcode'] + if 'icmptype' in security_group_rule[key][0]: + self.result['icmp_type'] = security_group_rule[key][0]['icmptype'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + security_group = dict(required=True), + type = dict(choices=['ingress', 'egress'], default='ingress'), + cidr = dict(default='0.0.0.0/0'), + user_security_group = dict(default=None), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None, aliases=['port']), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['icmp_code', 'start_port'], + ['icmp_code', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) + + state = module.params.get('state') + if state in ['absent']: + sg_rule = acs_sg_rule.remove_rule() + else: + sg_rule = acs_sg_rule.add_rule() + + result = acs_sg_rule.get_result(sg_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py new file mode 100644 index 00000000000..8dd02dcd1f1 --- /dev/null +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_sshkeypair +short_description: Manages SSH keys on Apache CloudStack based clouds. +description: + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of public key. + required: true + project: + description: + - Name of the project the public key to be registered in. + required: false + default: null + state: + description: + - State of the public key. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + public_key: + description: + - String of the public key. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# create a new private / public key pair: +- local_action: cs_sshkeypair name=linus@example.com + register: key +- debug: msg='private key is {{ key.private_key }}' + +# remove a public key by its name: +- local_action: cs_sshkeypair name=linus@example.com state=absent + +# register your existing local public key: +- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +''' + +RETURN = ''' +--- +name: + description: Name of the SSH public key. + returned: success + type: string + sample: linus@example.com +fingerprint: + description: Fingerprint of the SSH public key. + returned: success + type: string + sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" +private_key: + description: Private key of generated SSH keypair. + returned: changed + type: string + sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +try: + import sshpubkeys + has_lib_sshpubkeys = True +except ImportError: + has_lib_sshpubkeys = False + +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackSshKey(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.ssh_key = None + + + def register_ssh_key(self, public_key): + ssh_key = self.get_ssh_key() + + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + res = None + if not ssh_key: + self.result['changed'] = True + args['publickey'] = public_key + if not self.module.check_mode: + res = self.cs.registerSSHKeyPair(**args) + + else: + fingerprint = self._get_ssh_fingerprint(public_key) + if ssh_key['fingerprint'] != fingerprint: + self.result['changed'] = True + if not self.module.check_mode: + self.cs.deleteSSHKeyPair(**args) + args['publickey'] = public_key + res = self.cs.registerSSHKeyPair(**args) + + if res and 'keypair' in res: + ssh_key = res['keypair'] + + return ssh_key + + + def create_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.createSSHKeyPair(**args) + ssh_key = res['keypair'] + return ssh_key + + + def remove_ssh_key(self): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + if not self.module.check_mode: + res = self.cs.deleteSSHKeyPair(**args) + return ssh_key + + + def get_ssh_key(self): + if not self.ssh_key: + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + ssh_keys = self.cs.listSSHKeyPairs(**args) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + return self.ssh_key + + + def get_result(self, ssh_key): + if ssh_key: + if 'fingerprint' in ssh_key: + self.result['fingerprint'] = ssh_key['fingerprint'] + + if 'name' in ssh_key: + self.result['name'] = ssh_key['name'] + + if 'privatekey' in ssh_key: + self.result['private_key'] = ssh_key['privatekey'] + return self.result + + + def _get_ssh_fingerprint(self, public_key): + key = sshpubkeys.SSHKey(public_key) + return key.hash() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + public_key = dict(default=None), + project = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + if not has_lib_sshpubkeys: + module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") + + try: + acs_sshkey = AnsibleCloudStackSshKey(module) + state = module.params.get('state') + if state in ['absent']: + ssh_key = acs_sshkey.remove_ssh_key() + else: + public_key = module.params.get('public_key') + if public_key: + ssh_key = acs_sshkey.register_ssh_key(public_key) + else: + ssh_key = acs_sshkey.create_ssh_key() + + result = acs_sshkey.get_result(ssh_key) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py new file mode 100644 index 00000000000..dad660cd77c --- /dev/null +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_vmsnapshot +short_description: Manages VM snapshots on Apache CloudStack based clouds. +description: + - Create, remove and revert VM from snapshots. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Unique Name of the snapshot. In CloudStack terms C(displayname). + required: true + aliases: ['displayname'] + vm: + description: + - Name of the virtual machine. + required: true + description: + description: + - Description of the snapshot. + required: false + default: null + snapshot_memory: + description: + - Snapshot memory if set to true. + required: false + default: false + zone: + description: + - Name of the zone in which the VM is in. If not set, default zone is used. + required: false + default: null + project: + description: + - Name of the project the VM is assigned to. + required: false + default: null + state: + description: + - State of the snapshot. + required: false + default: 'present' + choices: [ 'present', 'absent', 'revert' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Create a VM snapshot of disk and memory before an upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + snapshot_memory: yes + + +# Revert a VM to a snapshot after a failed upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: revert + + +# Remove a VM snapshot after successful upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the snapshot. + returned: success + type: string + sample: snapshot before update +displayname: + description: displayname of the snapshot. + returned: success + type: string + sample: snapshot before update +created: + description: date of the snapshot. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +current: + description: true if snapshot is current + returned: success + type: boolean + sample: True +state: + description: state of the vm snapshot + returned: success + type: string + sample: Allocated +type: + description: type of vm snapshot + returned: success + type: string + sample: DiskAndMemory +description: + description: + description: description of vm snapshot + returned: success + type: string + sample: snapshot brought to you by Ansible +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def get_snapshot(self): + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + snapshots = self.cs.listVMSnapshot(**args) + if snapshots: + return snapshots['vmSnapshot'][0] + return None + + + def create_snapshot(self): + snapshot = self.get_snapshot() + if not snapshot: + self.result['changed'] = True + + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['name'] = self.module.params.get('name') + args['description'] = self.module.params.get('description') + args['snapshotmemory'] = self.module.params.get('snapshot_memory') + + if not self.module.check_mode: + res = self.cs.createVMSnapshot(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + snapshot = self._poll_job(res, 'vmsnapshot') + + return snapshot + + + def remove_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + + def revert_vm_to_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + + if snapshot['state'] != "Ready": + self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state']) + + if not self.module.check_mode: + res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + self.module.fail_json(msg="snapshot not found, could not revert VM") + + + def get_result(self, snapshot): + if snapshot: + if 'displayname' in snapshot: + self.result['displayname'] = snapshot['displayname'] + if 'created' in snapshot: + self.result['created'] = snapshot['created'] + if 'current' in snapshot: + self.result['current'] = snapshot['current'] + if 'state' in snapshot: + self.result['state'] = snapshot['state'] + if 'type' in snapshot: + self.result['type'] = snapshot['type'] + if 'name' in snapshot: + self.result['name'] = snapshot['name'] + if 'description' in snapshot: + self.result['description'] = snapshot['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['displayname']), + vm = dict(required=True), + description = dict(default=None), + project = dict(default=None), + zone = dict(default=None), + snapshot_memory = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent', 'revert'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) + + state = module.params.get('state') + if state in ['revert']: + snapshot = acs_vmsnapshot.revert_vm_to_snapshot() + elif state in ['absent']: + snapshot = acs_vmsnapshot.remove_snapshot() + else: + snapshot = acs_vmsnapshot.create_snapshot() + + result = acs_vmsnapshot.get_result(snapshot) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py new file mode 100644 index 00000000000..3b2351b3752 --- /dev/null +++ b/cloud/google/gce_img.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# Copyright 2015 Google Inc. All Rights Reserved. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""An Ansible module to utilize GCE image resources.""" + +DOCUMENTATION = ''' +--- +module: gce_img +version_added: "1.9" +short_description: utilize GCE image resources +description: + - This module can create and delete GCE private images from gzipped + compressed tarball containing raw disk data or from existing detached + disks in any zone. U(https://cloud.google.com/compute/docs/images) +options: + name: + description: + - the name of the image to create or delete + required: true + default: null + aliases: [] + description: + description: + - an optional description + required: false + default: null + aliases: [] + source: + description: + - the source disk or the Google Cloud Storage URI to create the image from + required: false + default: null + aliases: [] + state: + description: + - desired state of the image + required: false + default: "present" + choices: ["present", "absent"] + aliases: [] + zone: + description: + - the zone of the disk specified by source + required: false + default: "us-central1-a" + aliases: [] + service_account_email: + description: + - service account email + required: false + default: null + aliases: [] + pem_file: + description: + - path to the pem file associated with the service account email + required: false + default: null + aliases: [] + project_id: + description: + - your GCE project ID + required: false + default: null + aliases: [] + +requirements: [ "libcloud" ] +author: Peter Tan +''' + +EXAMPLES = ''' +# Create an image named test-image from the disk 'test-disk' in zone us-central1-a. +- gce_img: + name: test-image + source: test-disk + zone: us-central1-a + state: present + +# Create an image named test-image from a tarball in Google Cloud Storage. +- gce_img: + name: test-image + source: https://storage.googleapis.com/bucket/path/to/image.tgz + +# Alternatively use the gs scheme +- gce_img: + name: test-image + source: gs://bucket/path/to/image.tgz + +# Delete an image named test-image. +- gce_img: + name: test-image + state: absent +''' + +import sys + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError + from libcloud.common.google import ResourceExistsError + from libcloud.common.google import ResourceNotFoundError + _ = Provider.GCE + has_libcloud = True +except ImportError: + has_libcloud = False + + +GCS_URI = 'https://storage.googleapis.com/' + + +def create_image(gce, name, module): + """Create an image with the specified name.""" + source = module.params.get('source') + zone = module.params.get('zone') + desc = module.params.get('description') + + if not source: + module.fail_json(msg='Must supply a source', changed=False) + + if source.startswith(GCS_URI): + # source is a Google Cloud Storage URI + volume = source + elif source.startswith('gs://'): + # libcloud only accepts https URI. + volume = source.replace('gs://', GCS_URI) + else: + try: + volume = gce.ex_get_volume(source, zone) + except ResourceNotFoundError: + module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), + changed=False) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + try: + gce.ex_create_image(name, volume, desc, False) + return True + except ResourceExistsError: + return False + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + +def delete_image(gce, name, module): + """Delete a specific image resource by name.""" + try: + gce.ex_delete_image(name) + return True + except ResourceNotFoundError: + return False + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + description=dict(), + source=dict(), + state=dict(default='present', choices=['present', 'absent']), + zone=dict(default='us-central1-a'), + service_account_email=dict(), + pem_file=dict(), + project_id=dict(), + ) + ) + + if not has_libcloud: + module.fail_json(msg='libcloud with GCE support is required.') + + gce = gce_connect(module) + + name = module.params.get('name') + state = module.params.get('state') + changed = False + + # user wants to create an image. + if state == 'present': + changed = create_image(gce, name, module) + + # user wants to delete the image. + if state == 'absent': + changed = delete_image(gce, name, module) + + module.exit_json(changed=changed, name=name) + sys.exit(0) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * + +main() diff --git a/cloud/lxc/__init__.py b/cloud/lxc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py new file mode 100644 index 00000000000..5f0f6bb2ad6 --- /dev/null +++ b/cloud/lxc/lxc_container.py @@ -0,0 +1,1480 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Kevin Carter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = """ +--- +module: lxc_container +short_description: Manage LXC Containers +version_added: 1.8.0 +description: + - Management of LXC containers +author: Kevin Carter +options: + name: + description: + - Name of a container. + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + description: + - Backend storage type for the container. + required: false + default: dir + template: + description: + - Name of the template to use within an LXC create. + required: false + default: ubuntu + template_options: + description: + - Template options when building the container. + required: false + config: + description: + - Path to the LXC configuration file. + required: false + default: /etc/lxc/default.conf + lv_name: + description: + - Name of the logical volume, defaults to the container name. + default: $CONTAINER_NAME + required: false + vg_name: + description: + - If Backend store is lvm, specify the name of the volume group. + default: lxc + required: false + thinpool: + description: + - Use LVM thin pool called TP. + required: false + fs_type: + description: + - Create fstype TYPE. + default: ext4 + required: false + fs_size: + description: + - File system Size. + default: 5G + required: false + directory: + description: + - Place rootfs directory under DIR. + required: false + zfs_root: + description: + - Create zfs under given zfsroot. + required: false + container_command: + description: + - Run a command within a container. + required: false + lxc_path: + description: + - Place container under PATH + required: false + container_log: + choices: + - true + - false + description: + - Enable a container log for host actions to the container. + default: false + container_log_level: + choices: + - INFO + - ERROR + - DEBUG + description: + - Set the log level for a container where *container_log* was set. + required: false + default: INFO + archive: + choices: + - true + - false + description: + - Create an archive of a container. This will create a tarball of the + running container. + default: false + archive_path: + description: + - Path the save the archived container. If the path does not exist + the archive method will attempt to create it. + default: /tmp + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running + container. + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Start a container right after it's created. + required: false + default: started + container_config: + description: + - list of 'key=value' options to use when configuring a container. + required: false +requirements: ['lxc >= 1.0', 'python2-lxc >= 0.1'] +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already exists in the users namespace the module will + simply return as "unchanged". + - The "container_command" can be used with any state except "absent". If + used with state "stopped" the container will be "started", the command + executed, and then the container "stopped" again. Likewise if the state + is "stopped" and the container does not exist it will be first created, + "started", the command executed, and then "stopped". If you use a "|" + in the variable you can use common script formatting within the variable + iteself The "container_command" option will always execute as BASH. + When using "container_command" a log file is created in the /tmp/ directory + which contains both stdout and stderr of any command executed. + - If "archive" is **true** the system will attempt to create a compressed + tarball of the running container. The "archive" option supports LVM backed + containers and will create a snapshot of the running container when + creating the archive. + - If your distro does not have a package for "python2-lxc", which is a + requirement for this module, it can be installed from source at + "https://github.com/lxc/python2-lxc" +""" + +EXAMPLES = """ +- name: Create a started container + lxc_container: + name: test-container-started + container_log: true + template: ubuntu + state: started + template_options: --release trusty + +- name: Create a stopped container + lxc_container: + name: test-container-stopped + container_log: true + template: ubuntu + state: stopped + template_options: --release trusty + +- name: Create a frozen container + lxc_container: + name: test-container-frozen + container_log: true + template: ubuntu + state: frozen + template_options: --release trusty + container_command: | + echo 'hello world.' | tee /opt/started-frozen + +# Create filesystem container, configure it, and archive it, and start it. +- name: Create filesystem container + lxc_container: + name: test-container-config + container_log: true + template: ubuntu + state: started + archive: true + archive_compression: none + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + template_options: --release trusty + +# Create an lvm container, run a complex command in it, add additional +# configuration to it, create an archive of it, and finally leave the container +# in a frozen state. The container archive will be compressed using bzip2 +- name: Create an lvm container + lxc_container: + name: test-container-lvm + container_log: true + template: ubuntu + state: frozen + backing_store: lvm + template_options: --release trusty + container_command: | + apt-get update + apt-get install -y vim lxc-dev + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + archive: true + archive_compression: bzip2 + register: lvm_container_info + +- name: Debug info on container "test-container-lvm" + debug: var=lvm_container_info + +- name: Get information on a given container. + lxc_container: + name: test-container-config + register: config_container_info + +- name: debug info on container "test-container" + debug: var=config_container_info + +- name: Run a command in a container and ensure its in a "stopped" state. + lxc_container: + name: test-container-started + state: stopped + container_command: | + echo 'hello world.' | tee /opt/stopped + +- name: Run a command in a container and ensure its it in a "frozen" state. + lxc_container: + name: test-container-stopped + state: frozen + container_command: | + echo 'hello world.' | tee /opt/frozen + +- name: Start a container. + lxc_container: + name: test-container-stopped + state: started + +- name: Run a command in a container and then restart it. + lxc_container: + name: test-container-started + state: restarted + container_command: | + echo 'hello world.' | tee /opt/restarted + +- name: Run a complex command within a "running" container. + lxc_container: + name: test-container-started + container_command: | + apt-get update + apt-get install -y curl wget vim apache2 + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + +# Create an archive of an existing container, save the archive to a defined +# path and then destroy it. +- name: Archive container + lxc_container: + name: test-container-started + state: absent + archive: true + archive_path: /opt/archives + +- name: Destroy a container. + lxc_container: + name: "{{ item }}" + state: absent + with_items: + - test-container-stopped + - test-container-started + - test-container-frozen + - test-container-lvm + - test-container-config +""" + + +try: + import lxc +except ImportError: + msg = 'The lxc module is not importable. Check the requirements.' + print("failed=True msg='%s'" % msg) + raise SystemExit(msg) + + +# LXC_COMPRESSION_MAP is a map of available compression types when creating +# an archive of a container. +LXC_COMPRESSION_MAP = { + 'gzip': { + 'extension': 'tar.tgz', + 'argument': '-czf' + }, + 'bzip2': { + 'extension': 'tar.bz2', + 'argument': '-cjf' + }, + 'none': { + 'extension': 'tar', + 'argument': '-cf' + } +} + + +# LXC_COMMAND_MAP is a map of variables that are available to a method based +# on the state the container is in. +LXC_COMMAND_MAP = { + 'create': { + 'variables': { + 'config': '--config', + 'template': '--template', + 'backing_store': '--bdev', + 'lxc_path': '--lxcpath', + 'lv_name': '--lvname', + 'vg_name': '--vgname', + 'thinpool': '--thinpool', + 'fs_type': '--fstype', + 'fs_size': '--fssize', + 'directory': '--dir', + 'zfs_root': '--zfsroot' + } + } +} + + +# LXC_BACKING_STORE is a map of available storage backends and options that +# are incompatible with the given storage backend. +LXC_BACKING_STORE = { + 'dir': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ], + 'lvm': [ + 'zfs_root' + ], + 'btrfs': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'loop': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ] +} + + +# LXC_LOGGING_LEVELS is a map of available log levels +LXC_LOGGING_LEVELS = { + 'INFO': ['info', 'INFO', 'Info'], + 'ERROR': ['error', 'ERROR', 'Error'], + 'DEBUG': ['debug', 'DEBUG', 'Debug'] +} + + +# LXC_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXC_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen' +} + + +# This is used to attach to a running container and execute commands from +# within the container on the host. This will provide local access to a +# container without using SSH. The template will attempt to work within the +# home directory of the user that was attached to the container and source +# that users environment variables by default. +ATTACH_TEMPLATE = """#!/usr/bin/env bash +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" + if [[ -f ".bashrc" ]];then + source .bashrc + fi +popd + +# User defined command +%(container_command)s +""" + + +def create_script(command): + """Write out a script onto a target. + + This method should be backward compatible with Python 2.4+ when executing + from within the container. + + :param command: command to run, this can be a script and can use spacing + with newlines as separation. + :type command: ``str`` + """ + + import os + import os.path as path + import subprocess + import tempfile + + # Ensure that the directory /opt exists. + if not path.isdir('/opt'): + os.mkdir('/opt') + + # Create the script. + script_file = path.join('/opt', '.lxc-attach-script') + f = open(script_file, 'wb') + try: + f.write(ATTACH_TEMPLATE % {'container_command': command}) + f.flush() + finally: + f.close() + + # Ensure the script is executable. + os.chmod(script_file, 0755) + + # Get temporary directory. + tempdir = tempfile.gettempdir() + + # Output log file. + stdout = path.join(tempdir, 'lxc-attach-script.log') + stdout_file = open(stdout, 'ab') + + # Error log file. + stderr = path.join(tempdir, 'lxc-attach-script.err') + stderr_file = open(stderr, 'ab') + + # Execute the script command. + try: + subprocess.Popen( + [script_file], + stdout=stdout_file, + stderr=stderr_file + ).communicate() + finally: + # Close the log files. + stderr_file.close() + stdout_file.close() + + # Remove the script file upon completion of execution. + os.remove(script_file) + + +class LxcContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.state = self.module.params.get('state', None) + self.state_change = False + self.lxc_vg = None + self.container_name = self.module.params['name'] + self.container = self.get_container_bind() + self.archive_info = None + + def get_container_bind(self): + return lxc.Container(name=self.container_name) + + @staticmethod + def _roundup(num): + """Return a rounded floating point number. + + :param num: Number to round up. + :type: ``float`` + :returns: Rounded up number. + :rtype: ``int`` + """ + num, part = str(num).split('.') + num = int(num) + if int(part) != 0: + num += 1 + return num + + @staticmethod + def _container_exists(name): + """Check if a container exists. + + :param name: Name of the container. + :type: ``str`` + :returns: True or False if the container is found. + :rtype: ``bol`` + """ + if [i for i in lxc.list_containers() if i == name]: + return True + else: + return False + + @staticmethod + def _add_variables(variables_dict, build_command): + """Return a command list with all found options. + + :param variables_dict: Pre-parsed optional variables used from a + seed command. + :type variables_dict: ``dict`` + :param build_command: Command to run. + :type build_command: ``list`` + :returns: list of command options. + :rtype: ``list`` + """ + + for key, value in variables_dict.items(): + build_command.append( + '%s %s' % (key, value) + ) + else: + return build_command + + def _get_vars(self, variables): + """Return a dict of all variables as found within the module. + + :param variables: Hash of all variables to find. + :type variables: ``dict`` + """ + + # Remove incompatible storage backend options. + for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + variables.pop(v, None) + + return_dict = dict() + for k, v in variables.items(): + _var = self.module.params.get(k) + if not [i for i in [None, ''] + BOOLEANS_FALSE if i == _var]: + return_dict[v] = _var + else: + return return_dict + + def _run_command(self, build_command, unsafe_shell=False, timeout=600): + """Return information from running an Ansible Command. + + This will squash the build command list into a string and then + execute the command via Ansible. The output is returned to the method. + This output is returned as `return_code`, `stdout`, `stderr`. + + Prior to running the command the method will look to see if the LXC + lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method + will wait upto 10 minutes for it to be gone; polling every 5 seconds. + + :param build_command: Used for the command and all options. + :type build_command: ``list`` + :param unsafe_shell: Enable or Disable unsafe sell commands. + :type unsafe_shell: ``bol`` + :param timeout: Time before the container create process quites. + :type timeout: ``int`` + """ + + lockfile = '/var/lock/subsys/lxc' + + for _ in xrange(timeout): + if os.path.exists(lockfile): + time.sleep(1) + else: + return self.module.run_command( + ' '.join(build_command), + use_unsafe_shell=unsafe_shell + ) + else: + message = ( + 'The LXC subsystem is locked and after 5 minutes it never' + ' became unlocked. Lockfile [ %s ]' % lockfile + ) + self.failure( + error='LXC subsystem locked', + rc=0, + msg=message + ) + + def _config(self): + """Configure an LXC container. + + Write new configuration values to the lxc config file. This will + stop the container if it's running write the new options and then + restart the container upon completion. + """ + + _container_config = self.module.params.get('container_config') + if not _container_config: + return False + + container_config_file = self.container.config_file_name + with open(container_config_file, 'rb') as f: + container_config = f.readlines() + + # Note used ast literal_eval because AnsibleModule does not provide for + # adequate dictionary parsing. + # Issue: https://github.com/ansible/ansible/issues/7679 + # TODO(cloudnull) adjust import when issue has been resolved. + import ast + options_dict = ast.literal_eval(_container_config) + parsed_options = [i.split('=', 1) for i in options_dict] + + config_change = False + for key, value in parsed_options: + new_entry = '%s = %s\n' % (key, value) + for option_line in container_config: + # Look for key in config + if option_line.startswith(key): + _, _value = option_line.split('=') + config_value = ' '.join(_value.split()) + line_index = container_config.index(option_line) + # If the sanitized values don't match replace them + if value != config_value: + line_index += 1 + if new_entry not in container_config: + config_change = True + container_config.insert(line_index, new_entry) + # Break the flow as values are written or not at this point + break + else: + config_change = True + container_config.append(new_entry) + + # If the config changed restart the container. + if config_change: + container_state = self._get_state() + if container_state != 'stopped': + self.container.stop() + + with open(container_config_file, 'wb') as f: + f.writelines(container_config) + + self.state_change = True + if container_state == 'running': + self._container_startup() + elif container_state == 'frozen': + self._container_startup() + self.container.freeze() + + def _create(self): + """Create a new LXC container. + + This method will build and execute a shell command to build the + container. It would have been nice to simply use the lxc python library + however at the time this was written the python library, in both py2 + and py3 didn't support some of the more advanced container create + processes. These missing processes mainly revolve around backing + LXC containers with block devices. + """ + + build_command = [ + self.module.get_bin_path('lxc-create', True), + '--name %s' % self.container_name, + '--quiet' + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['create']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('container_log') in BOOLEANS_TRUE: + # Set the logging path to the /var/log/lxc if uid is root. else + # set it to the home folder of the user executing. + try: + if os.getuid() != 0: + log_path = os.getenv('HOME') + else: + if not os.path.isdir('/var/log/lxc/'): + os.makedirs('/var/log/lxc/') + log_path = '/var/log/lxc/' + except OSError: + log_path = os.getenv('HOME') + + build_command.extend([ + '--logfile %s' % os.path.join( + log_path, 'lxc-%s.log' % self.container_name + ), + '--logpriority %s' % self.module.params.get( + 'container_log_level' + ).upper() + ]) + + # Add the template commands to the end of the command if there are any + template_options = self.module.params.get('template_options', None) + if template_options: + build_command.append('-- %s' % template_options) + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + msg = "Failed executing lxc-create." + self.failure( + err=err, rc=rc, msg=msg, command=' '.join(build_command) + ) + else: + self.state_change = True + + def _container_data(self): + """Returns a dict of container information. + + :returns: container data + :rtype: ``dict`` + """ + + return { + 'interfaces': self.container.get_interfaces(), + 'ips': self.container.get_ips(), + 'state': self._get_state(), + 'init_pid': int(self.container.init_pid) + } + + def _unfreeze(self): + """Unfreeze a container. + + :returns: True or False based on if the container was unfrozen. + :rtype: ``bol`` + """ + + unfreeze = self.container.unfreeze() + if unfreeze: + self.state_change = True + return unfreeze + + def _get_state(self): + """Return the state of a container. + + If the container is not found the state returned is "absent" + + :returns: state of a container as a lower case string. + :rtype: ``str`` + """ + + if self._container_exists(name=self.container_name): + return str(self.container.state).lower() + else: + return str('absent') + + def _execute_command(self): + """Execute a shell command.""" + + container_command = self.module.params.get('container_command') + if container_command: + container_state = self._get_state() + if container_state == 'frozen': + self._unfreeze() + elif container_state == 'stopped': + self._container_startup() + + self.container.attach_wait(create_script, container_command) + self.state_change = True + + def _container_startup(self, timeout=60): + """Ensure a container is started. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + self.container = self.get_container_bind() + for _ in xrange(timeout): + if self._get_state() != 'running': + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) + else: + return True + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' + ) + + def _check_archive(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + if self.module.params.get('archive') in BOOLEANS_TRUE: + self.archive_info = { + 'archive': self._container_create_tar() + } + + def _destroyed(self, timeout=60): + """Ensure a container is destroyed. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + for _ in xrange(timeout): + if not self._container_exists(name=self.container_name): + break + + # Check if the container needs to have an archive created. + self._check_archive() + + if self._get_state() != 'stopped': + self.state_change = True + self.container.stop() + + if self.container.destroy(): + self.state_change = True + + # post destroy attempt sleep for 1 second. + time.sleep(1) + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to destroy container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to be destroyed. Check' + ' that lxc is available and that the container is in a' + ' functional state.' % self.container_name + ) + + def _frozen(self, count=0): + """Ensure a container is frozen. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='frozen') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + container_state = self._get_state() + if container_state == 'frozen': + pass + elif container_state == 'running': + self.container.freeze() + self.state_change = True + else: + self._container_startup() + self.container.freeze() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._frozen(count) + + def _restarted(self, count=0): + """Ensure a container is restarted. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='restart') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._restarted(count) + + def _stopped(self, count=0): + """Ensure a container is stopped. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='stop') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._stopped(count) + + def _started(self, count=0): + """Ensure a container is started. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='start') + if self._container_exists(name=self.container_name): + container_state = self._get_state() + if container_state == 'running': + pass + elif container_state == 'frozen': + self._unfreeze() + elif not self._container_startup(): + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._started(count) + + def _get_lxc_vg(self): + """Return the name of the Volume Group used in LXC.""" + + build_command = [ + self.module.get_bin_path('lxc-config', True), + "lxc.bdev.lvm.vg" + ] + rc, vg, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to read LVM VG from LXC config', + command=' '.join(build_command) + ) + else: + return str(vg.strip()) + + def _lvm_lv_list(self): + """Return a list of all lv in a current vg.""" + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvs', True) + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to get list of LVs', + command=' '.join(build_command) + ) + + all_lvms = [i.split() for i in stdout.splitlines()][1:] + return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] + + def _get_vg_free_pe(self, name): + """Return the available size of a given VG. + + :param name: Name of volume. + :type name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + build_command = [ + 'vgdisplay', + name, + '--units', + 'g' + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read vg %s' % name, + command=' '.join(build_command) + ) + + vg_info = [i.strip() for i in stdout.splitlines()][1:] + free_pe = [i for i in vg_info if i.startswith('Free')] + _free_pe = free_pe[0].split() + return float(_free_pe[-2]), _free_pe[-1] + + def _get_lv_size(self, name): + """Return the available size of a given LV. + + :param name: Name of volume. + :type name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + vg = self._get_lxc_vg() + lv = os.path.join(vg, name) + build_command = [ + 'lvdisplay', + lv, + '--units', + 'g' + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read lv %s' % lv, + command=' '.join(build_command) + ) + + lv_info = [i.strip() for i in stdout.splitlines()][1:] + _free_pe = [i for i in lv_info if i.startswith('LV Size')] + free_pe = _free_pe[0].split() + return self._roundup(float(free_pe[-2])), free_pe[-1] + + def _lvm_snapshot_create(self, source_lv, snapshot_name, + snapshot_size_gb=5): + """Create an LVM snapshot. + + :param source_lv: Name of lv to snapshot + :type source_lv: ``str`` + :param snapshot_name: Name of lv snapshot + :type snapshot_name: ``str`` + :param snapshot_size_gb: Size of snapshot to create + :type snapshot_size_gb: ``int`` + """ + + vg = self._get_lxc_vg() + free_space, messurement = self._get_vg_free_pe(name=vg) + + if free_space < float(snapshot_size_gb): + message = ( + 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' + ' [ %s ]' % (snapshot_size_gb, free_space, vg) + ) + self.failure( + error='Not enough space to create snapshot', + rc=2, + msg=message + ) + + # Create LVM Snapshot + build_command = [ + self.module.get_bin_path('lvcreate', True), + "-n", + snapshot_name, + "-s", + os.path.join(vg, source_lv), + "-L%sg" % snapshot_size_gb + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to Create LVM snapshot %s/%s --> %s' + % (vg, source_lv, snapshot_name) + ) + + def _lvm_lv_mount(self, lv_name, mount_point): + """mount an lv. + + :param lv_name: name of the logical volume to mount + :type lv_name: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + vg = self._get_lxc_vg() + + build_command = [ + self.module.get_bin_path('mount', True), + "/dev/%s/%s" % (vg, lv_name), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mountlvm lv %s/%s to %s' + % (vg, lv_name, mount_point) + ) + + def _create_tar(self, source_dir): + """Create an archive of a given ``source_dir`` to ``output_path``. + + :param source_dir: Path to the directory to be archived. + :type source_dir: ``str`` + """ + + archive_path = self.module.params.get('archive_path') + if not os.path.isdir(archive_path): + os.makedirs(archive_path) + + archive_compression = self.module.params.get('archive_compression') + compression_type = LXC_COMPRESSION_MAP[archive_compression] + + # remove trailing / if present. + archive_name = '%s.%s' % ( + os.path.join( + archive_path, + self.container_name + ), + compression_type['extension'] + ) + + build_command = [ + self.module.get_bin_path('tar', True), + '--directory=%s' % os.path.realpath( + os.path.expanduser(source_dir) + ), + compression_type['argument'], + archive_name, + '.' + ] + + rc, stdout, err = self._run_command( + build_command=build_command, + unsafe_shell=True + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to create tar archive', + command=' '.join(build_command) + ) + + return archive_name + + def _lvm_lv_remove(self, name): + """Remove an LV. + + :param name: The name of the logical volume + :type name: ``str`` + """ + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvremove', True), + "-f", + "%s/%s" % (vg, name), + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to remove LVM LV %s/%s' % (vg, name), + command=' '.join(build_command) + ) + + def _rsync_data(self, container_path, temp_dir): + """Sync the container directory to the temp directory. + + :param container_path: path to the container container + :type container_path: ``str`` + :param temp_dir: path to the temporary local working directory + :type temp_dir: ``str`` + """ + + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + container_path, + temp_dir + ] + rc, stdout, err = self._run_command(build_command, unsafe_shell=True) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('umount', True), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to unmount [ %s ]' % mount_point, + command=' '.join(build_command) + ) + + def _container_create_tar(self): + """Create a tar archive from an LXC container. + + The process is as follows: + * Stop or Freeze the container + * Create temporary dir + * Copy container and config to temporary directory + * If LVM backed: + * Create LVM snapshot of LV backing the container + * Mount the snapshot to tmpdir/rootfs + * Restore the state of the container + * Create tar of tmpdir + * Clean up + """ + + # Create a temp dir + temp_dir = tempfile.mkdtemp() + + # Set the name of the working dir, temp + container_name + work_dir = os.path.join(temp_dir, self.container_name) + + # LXC container rootfs + lxc_rootfs = self.container.get_config_item('lxc.rootfs') + + # Test if the containers rootfs is a block device + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + mount_point = os.path.join(work_dir, 'rootfs') + + # Set the snapshot name if needed + snapshot_name = '%s_lxc_snapshot' % self.container_name + + # Set the path to the container data + container_path = os.path.dirname(lxc_rootfs) + container_state = self._get_state() + try: + # Ensure the original container is stopped or frozen + if container_state not in ['stopped', 'frozen']: + if container_state == 'running': + self.container.freeze() + else: + self.container.stop() + + # Sync the container data from the container_path to work_dir + self._rsync_data(container_path, temp_dir) + + if block_backed: + if snapshot_name not in self._lvm_lv_list(): + if not os.path.exists(mount_point): + os.makedirs(mount_point) + + # Take snapshot + size, measurement = self._get_lv_size( + name=self.container_name + ) + self._lvm_snapshot_create( + source_lv=self.container_name, + snapshot_name=snapshot_name, + snapshot_size_gb=size + ) + + # Mount snapshot + self._lvm_lv_mount( + lv_name=snapshot_name, + mount_point=mount_point + ) + else: + self.failure( + err='snapshot [ %s ] already exists' % snapshot_name, + rc=1, + msg='The snapshot [ %s ] already exists. Please clean' + ' up old snapshot of containers before continuing.' + % snapshot_name + ) + + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + + # Set the state as changed and set a new fact + self.state_change = True + return self._create_tar(source_dir=work_dir) + finally: + if block_backed: + # unmount snapshot + self._unmount(mount_point) + + # Remove snapshot + self._lvm_lv_remove(snapshot_name) + + # Remove tmpdir + shutil.rmtree(temp_dir) + + def check_count(self, count, method): + if count > 1: + self.failure( + error='Failed to %s container' % method, + rc=1, + msg='The container [ %s ] failed to %s. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % (self.container_name, method) + ) + + def failure(self, **kwargs): + """Return a Failure when running an Ansible command. + + :param error: ``str`` Error that occurred. + :param rc: ``int`` Return code while executing an Ansible command. + :param msg: ``str`` Message to report. + """ + + self.module.fail_json(**kwargs) + + def run(self): + """Run the main method.""" + + action = getattr(self, LXC_ANSIBLE_STATES[self.state]) + action() + + outcome = self._container_data() + if self.archive_info: + outcome.update(self.archive_info) + + self.module.exit_json( + changed=self.state_change, + lxc_container=outcome + ) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + template=dict( + type='str', + default='ubuntu' + ), + backing_store=dict( + type='str', + choices=LXC_BACKING_STORE.keys(), + default='dir' + ), + template_options=dict( + type='str' + ), + config=dict( + type='str', + default='/etc/lxc/default.conf' + ), + vg_name=dict( + type='str', + default='lxc' + ), + thinpool=dict( + type='str' + ), + fs_type=dict( + type='str', + default='ext4' + ), + fs_size=dict( + type='str', + default='5G' + ), + directory=dict( + type='str' + ), + zfs_root=dict( + type='str' + ), + lv_name=dict( + type='str' + ), + lxc_path=dict( + type='str' + ), + state=dict( + choices=LXC_ANSIBLE_STATES.keys(), + default='started' + ), + container_command=dict( + type='str' + ), + container_config=dict( + type='str' + ), + container_log=dict( + choices=BOOLEANS, + default='false' + ), + container_log_level=dict( + choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], + default='INFO' + ), + archive=dict( + choices=BOOLEANS, + default='false' + ), + archive_path=dict( + type='str', + default='/tmp' + ), + archive_compression=dict( + choices=LXC_COMPRESSION_MAP.keys(), + default='gzip' + ) + ), + supports_check_mode=False, + ) + + lv_name = module.params.get('lv_name') + if not lv_name: + module.params['lv_name'] = module.params.get('name') + + lxc_manage = LxcContainerManagement(module=module) + lxc_manage.run() + + +# import module bits +from ansible.module_utils.basic import * +main() + diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py new file mode 100644 index 00000000000..35cf7fa4692 --- /dev/null +++ b/cloud/vmware/vmware_datacenter.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_datacenter +short_description: Manage VMware vSphere Datacenters +description: + - Manage VMware vSphere Datacenters +version_added: 2.0 +author: Joseph Callen +notes: + - Tested on vSphere 5.5 +requirements: + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + datacenter_name: + description: + - The name of the datacenter the cluster will be created in. + required: True + state: + description: + - If the datacenter should be present or absent + choices: ['present', 'absent'] + required: True +''' + +EXAMPLES = ''' +# Example vmware_datacenter command from Ansible Playbooks +- name: Create Datacenter + local_action: > + vmware_datacenter + hostname="{{ ansible_ssh_host }}" username=root password=vmware + datacenter_name="datacenter" +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def state_create_datacenter(module): + datacenter_name = module.params['datacenter_name'] + content = module.params['content'] + changed = True + datacenter = None + + folder = content.rootFolder + + try: + if not module.check_mode: + datacenter = folder.CreateDatacenter(name=datacenter_name) + module.exit_json(changed=changed, result=str(datacenter)) + except vim.fault.DuplicateName: + module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name) + except vim.fault.InvalidName: + module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name) + except vmodl.fault.NotSupported: + # This should never happen + module.fail_json(msg="Trying to create a datacenter on an incorrect folder object") + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def check_datacenter_state(module): + datacenter_name = module.params['datacenter_name'] + + try: + content = connect_to_api(module) + datacenter = find_datacenter_by_name(content, datacenter_name) + module.params['content'] = content + + if datacenter is None: + return 'absent' + else: + module.params['datacenter'] = datacenter + return 'present' + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_destroy_datacenter(module): + datacenter = module.params['datacenter'] + changed = True + result = None + + try: + if not module.check_mode: + task = datacenter.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=result) + except vim.fault.VimFault as vim_fault: + module.fail_json(msg=vim_fault.msg) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update( + dict( + datacenter_name=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + datacenter_states = { + 'absent': { + 'present': state_destroy_datacenter, + 'absent': state_exit_unchanged, + }, + 'present': { + 'present': state_exit_unchanged, + 'absent': state_create_datacenter, + } + } + desired_state = module.params['state'] + current_state = check_datacenter_state(module) + + datacenter_states[desired_state][current_state](module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() diff --git a/clustering/consul b/clustering/consul new file mode 100644 index 00000000000..5db79e20c40 --- /dev/null +++ b/clustering/consul @@ -0,0 +1,506 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul +short_description: "Add, modify & delete services within a consul cluster. + See http://conul.io for more details." +description: + - registers services and checks for an agent with a consul cluster. A service + is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. + Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:'. + Node level checks require a check_name and optionally a check_id. + Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An api method is planned to supply this metadata so at that + stage change management will be added. +requirements: + - python-consul + - requests +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - register or deregister the consul service, defaults to present + required: true + choices: ['present', 'absent'] + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false + service_id: + description: + - the ID for the service, must be unique per node, defaults to the + service name if the service name is supplied + required: false + default: service_name if supplied + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 + notes: + description: + - Notes to attach to check when registering it. + required: false + default: None + service_port: + description: + - the port on which the service is listening required for + registration of a service, i.e. if service_name or service_id is set + required: false + tags: + description: + - a list of tags that will be attached to the service registration. + required: false + default: None + script: + description: + - the script/command that will be run periodically to check the health + of the service. Scripts require an interval and vise versa + required: false + default: None + interval: + description: + - the interval at which the service check will be run. This is a number + with a s or m suffix to signify the units of seconds or minutes e.g + 15s or 1m. If no suffix is supplied, m will be used by default e.g. + 1 will be 1m. Required if the script param is specified. + required: false + default: None + check_id: + description: + - an ID for the service check, defaults to the check name, ignored if + part of a service definition. + required: false + default: None + check_name: + description: + - a name for the service check, defaults to the check id. required if + standalone, ignored if part of service definition. + required: false + default: None + ttl: + description: + - checks can be registered with a ttl instead of a script and interval + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g 15s or 1m. If no suffix + is supplied, m will be used by default e.g. 1 will be 1m + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set. May be required to + register services. + required: false + default: None +""" + +EXAMPLES = ''' + - name: register nginx service with the local consul agent + consul: + name: nginx + service_port: 80 + + - name: register nginx service with curl check + consul: + name: nginx + service_port: 80 + script: "curl http://localhost" + interval: 60s + + - name: register nginx with some service tags + consul: + name: nginx + service_port: 80 + tags: + - prod + - webservers + + - name: remove nginx service + consul: + name: nginx + state: absent + + - name: create a node level check to test disk usage + consul: + check_name: Disk usage + check_id: disk_usage + script: "/opt/disk_usage.py" + interval: 5m + +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +def register_with_consul(module): + + state = module.params.get('state') + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params.get('service_id') or module.params.get('service_name') + check_id = module.params.get('check_id') or module.params.get('check_name') + if not (service_id or check_id): + module.fail_json(msg='services and checks are removed by id or name.'\ + ' please supply a service id/name or a check id/name') + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name: + module.fail_json(msg='a check name is required for a node level check,'\ + ' one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id(consul_api, service.id) + + # there is no way to retreive the details of checks so if a check is present + # in the service it must be reregistered + if service.has_checks() or not existing or not existing == service: + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=map(lambda x: x.to_dict(), service.checks), + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def get_service_by_id(consul_api, service_id): + ''' iterate the registered services and find one with the given id ''' + for name, service in consul_api.agent.services().iteritems(): + if service['ID'] == service_id: + return ConsulService(loaded=service) + + +def parse_check(module): + + if module.params.get('script') and module.params.get('ttl'): + module.fail_json( + msg='check are either script or ttl driven, supplying both does'\ + ' not make sense') + + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'): + + return ConsulCheck( + module.params.get('check_id'), + module.params.get('check_name'), + module.params.get('check_node'), + module.params.get('check_host'), + module.params.get('script'), + module.params.get('interval'), + module.params.get('ttl'), + module.params.get('notes') + ) + + +def parse_service(module): + + if module.params.get('service_name') and module.params.get('service_port'): + return ConsulService( + module.params.get('service_id'), + module.params.get('service_name'), + module.params.get('service_port'), + module.params.get('tags'), + ) + elif module.params.get('service_name') and not module.params.get('service_port'): + + module.fail_json( + msg="service_name supplied but no service_port, a port is required"\ + " to configure a service. Did you configure the 'port' "\ + "argument meaning 'service_port'?") + + +class ConsulService(): + + def __init__(self, service_id=None, name=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.port = port + self.tags = tags + self.checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + if len(self.checks) > 0: + check = self.checks[0] + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags, + script=check.script, + interval=check.interval, + ttl=check.ttl) + else: + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags) + + def add_check(self, check): + self.checks.append(check) + + def checks(self): + return self.checks + + def has_checks(self): + return len(self.checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.id == other.id + and self.name == other.name + and self.port == other.port + and self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self.checks) > 0: + data['check'] = self.checks[0].to_dict() + return data + + +class ConsulCheck(): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.script = script + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.notes = notes + self.node = node + self.host = host + + + + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, + script=self.script, + interval=self.interval, + ttl=self.ttl, notes=self.notes) + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.check_id == other.check_id + and self.name == other.name + and self.script == script + and self.interval == interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + return data + + def _add(self, data, key, attr=None): + try: + if attr == None: + attr = key + data[key] = getattr(self, attr) + except: + pass + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + check_id=dict(required=False), + check_name=dict(required=False), + check_node=dict(required=False), + check_host=dict(required=False), + notes=dict(required=False), + script=dict(required=False), + service_id=dict(required=False), + service_name=dict(required=False), + service_port=dict(required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), + tags=dict(required=False, type='list'), + token=dict(required=False) + ), + supports_check_mode=False, + ) + + test_dependencies(module) + + try: + register_with_consul(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_acl b/clustering/consul_acl new file mode 100644 index 00000000000..c481b780a64 --- /dev/null +++ b/clustering/consul_acl @@ -0,0 +1,320 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_acl +short_description: "manipulate consul acl keys and rules" +description: + - allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/internals/acl.html. +requirements: + - python-consul + - pyhcl + - requests +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + state: + description: + - whether the ACL pair should be present or absent, defaults to present + required: false + choices: ['present', 'absent'] + type: + description: + - the type of token that should be created, either management or + client, defaults to client + choices: ['client', 'management'] + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + token: + description: + - the token key indentifying an ACL rule set. If generated by consul + this will be a UUID. + required: false + rules: + description: + - an list of the rules that should be associated with a given key/token. + required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + +EXAMPLES = ''' + - name: create an acl token with rules + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + name: 'Foo access' + rules: + - key: 'foo' + policy: read + - key: 'private/foo' + policy: deny + + - name: remove a token + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e' + state: absent +''' + +import sys +import urllib2 + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +try: + import hcl + pyhcl_installed = True +except ImportError: + pyhcl_installed = False + +from requests.exceptions import ConnectionError + +def execute(module): + + state = module.params.get('state') + + if state == 'present': + update_acl(module) + else: + remove_acl(module) + + +def update_acl(module): + + rules = module.params.get('rules') + state = module.params.get('state') + token = module.params.get('token') + token_type = module.params.get('token_type') + mgmt = module.params.get('mgmt_token') + name = module.params.get('name') + consul = get_consul_api(module, mgmt) + changed = False + + try: + + if token: + existing_rules = load_rules_for_token(module, consul, token) + supplied_rules = yml_to_rules(module, rules) + print existing_rules + print supplied_rules + changed = not existing_rules == supplied_rules + if changed: + y = supplied_rules.to_hcl() + token = consul.acl.update( + token, + name=name, + type=token_type, + rules=supplied_rules.to_hcl()) + else: + try: + rules = yml_to_rules(module, rules) + if rules.are_rules(): + rules = rules.to_json() + else: + rules = None + + token = consul.acl.create( + name=name, type=token_type, rules=rules) + changed = True + except Exception, e: + module.fail_json( + msg="No token returned, check your managment key and that \ + the host is in the acl datacenter %s" % e) + except Exception, e: + module.fail_json(msg="Could not create/update acl %s" % e) + + module.exit_json(changed=changed, + token=token, + rules=rules, + name=name, + type=token_type) + + +def remove_acl(module): + state = module.params.get('state') + token = module.params.get('token') + mgmt = module.params.get('mgmt_token') + + consul = get_consul_api(module, token=mgmt) + changed = token and consul.acl.info(token) + if changed: + token = consul.acl.destroy(token) + + module.exit_json(changed=changed, token=token) + + +def load_rules_for_token(module, consul_api, token): + try: + rules = Rules() + info = consul_api.acl.info(token) + if info and info['Rules']: + rule_set = to_ascii(info['Rules']) + for rule in hcl.loads(rule_set).values(): + for key, policy in rule.iteritems(): + rules.add_rule(Rule(key, policy['policy'])) + return rules + except Exception, e: + module.fail_json( + msg="Could not load rule list from retrieved rule data %s, %s" % ( + token, e)) + + return json_to_rules(module, loaded) + +def to_ascii(unicode_string): + if isinstance(unicode_string, unicode): + return unicode_string.encode('ascii', 'ignore') + return unicode_string + +def yml_to_rules(module, yml_rules): + rules = Rules() + if yml_rules: + for rule in yml_rules: + if not('key' in rule or 'policy' in rule): + module.fail_json(msg="a rule requires a key and a policy.") + rules.add_rule(Rule(rule['key'], rule['policy'])) + return rules + +template = '''key "%s" { + policy = "%s" +}''' + +class Rules: + + def __init__(self): + self.rules = {} + + def add_rule(self, rule): + self.rules[rule.key] = rule + + def are_rules(self): + return len(self.rules) > 0 + + def to_json(self): + rules = {} + for key, rule in self.rules.iteritems(): + rules[key] = {'policy': rule.policy} + return json.dumps({'keys': rules}) + + def to_hcl(self): + + rules = "" + for key, rule in self.rules.iteritems(): + rules += template % (key, rule.policy) + + return to_ascii(rules) + + def __eq__(self, other): + if not (other or isinstance(other, self.__class__) + or len(other.rules) == len(self.rules)): + return False + + for name, other_rule in other.rules.iteritems(): + if not name in self.rules: + return False + rule = self.rules[name] + + if not (rule and rule == other_rule): + return False + return True + + def __str__(self): + return self.to_hcl() + +class Rule: + + def __init__(self, key, policy): + self.key = key + self.policy = policy + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.key == other.key + and self.policy == other.policy) + def __hash__(self): + return hash(self.key) ^ hash(self.policy) + + def __str__(self): + return '%s %s' % (self.key, self.policy) + +def get_consul_api(module, token=None): + if not token: + token = token = module.params.get('token') + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=token) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + + if not pyhcl_installed: + module.fail_json( msg="pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl") + +def main(): + argument_spec = dict( + mgmt_token=dict(required=True), + host=dict(default='localhost'), + name=dict(required=False), + port=dict(default=8500, type='int'), + rules=dict(default=None, required=False, type='list'), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False), + token_type=dict( + required=False, choices=['client', 'management'], default='client') + ) + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_kv b/clustering/consul_kv new file mode 100644 index 00000000000..e5a010a8c18 --- /dev/null +++ b/clustering/consul_kv @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_kv +short_description: "manipulate entries in the key/value store of a consul + cluster. See http://www.consul.io/docs/agent/http.html#kv for more details." +description: + - allows the addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as 'value'. If the key + represents a prefix then Note that when a value is removed, the existing + value if any is returned as part of the results. +requirements: + - python-consul + - requests +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - the action to take with the supplied key and value. If the state is + 'present', the key contents will be set to the value supplied, + 'changed' will be set to true only if the value was different to the + current contents. The state 'absent' will remove the key/value pair, + again 'changed' will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states 'acquire' or + 'release' respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + required: false + choices: ['present', 'absent', 'acquire', 'release'] + default: present + key: + description: + - the key at which the value should be stored. + required: true + value: + description: + - the value should be associated with the given key, required if state + is present + required: true + recurse: + description: + - if the key represents a prefix, each entry with the prefix can be + retrieved by setting this to true. + required: false + default: false + session: + description: + - the session that should be used to acquire or release a lock + associated with a key/value pair + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set that controls access to + the key value pair + required: false + default: None + cas: + description: + - used when acquiring a lock with a session. If the cas is 0, then + Consul will only put the key if it does not already exist. If the + cas value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + required: false + default: None + flags: + description: + - opaque integer value that can be passed when setting a value. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + + +EXAMPLES = ''' + + - name: add or update the value associated with a key in the key/value store + consul_kv: + key: somekey + value: somevalue + + - name: remove a key from the store + consul_kv: + key: somekey + state: absent + + - name: add a node to an arbitrary group via consul inventory (see consul.ini) + consul_kv: + key: ansible/groups/dc1/somenode + value: 'top_secret' +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +from requests.exceptions import ConnectionError + +def execute(module): + + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + if state == 'present': + add_value(module) + else: + remove_value(module) + + +def lock(module, state): + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + if state == 'acquire': + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=successful, + index=index, + key=key) + + +def add_value(module): + + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get(key) + + changed = not existing or (existing and existing['Value'] != value) + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing != None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + + argument_spec = dict( + cas=dict(required=False), + flags=dict(required=False), + key=dict(required=True), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + recurse=dict(required=False, type='bool'), + retrieve=dict(required=False, default=True), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False, default='anonymous'), + value=dict(required=False) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_session b/clustering/consul_session new file mode 100644 index 00000000000..8e6516891d2 --- /dev/null +++ b/clustering/consul_session @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_session +short_description: "manipulate consul sessions" +description: + - allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found here http://www.consul.io/docs/internals/sessions.html +requirements: + - python-consul + - requests +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the ID for the + session is returned in the output. If absent, the name or ID is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying info, node or list for the state; for node or info, the + node name or session id is required as parameter. + required: false + choices: ['present', 'absent', 'info', 'node', 'list'] + default: present + name: + description: + - the name that should be associated with the session. This is opaque + to Consul and not required. + required: false + default: None + delay: + description: + - the optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. Valid units for delays + include 'ns', 'us', 'ms', 's', 'm', 'h' + default: 15s + required: false + node: + description: + - the name of the node that with which the session will be associated. + by default this is the name of the agent. + required: false + default: None + datacenter: + description: + - name of the datacenter in which the session exists or should be + created. + required: false + default: None + checks: + description: + - a list of checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + +EXAMPLES = ''' +- name: register basic session with consul + consul_session: + name: session1 + +- name: register a session with an existing check + consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20s + +- name: retrieve info about session by id + consul_session: id=session_id state=info + +- name: retrieve active sessions + consul_session: state=list +''' + +import sys +import urllib2 + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul.session.list(dc=datacenter) + #ditch the index, this can be grabbed from the results + if sessions_list and sessions_list[1]: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + if not node: + module.fail_json( + msg="node name is required to retrieve sessions for node") + sessions = consul.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + if not session_id: + module.fail_json( + msg="session_id is required to retrieve indvidual session info") + + session_by_id = consul.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception, e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + session_id = module.params.get('id') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + + consul = get_consul_api(module) + changed = True + + try: + + session = consul.session.create( + name=name, + node=node, + lock_delay=validate_duration('delay', delay), + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + delay=delay, + checks=checks, + node=node) + except Exception, e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + if not session_id: + module.fail_json(msg="""A session id must be supplied in order to + remove a session.""") + + consul = get_consul_api(module) + changed = False + + try: + session = consul.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception, e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + +def validate_duration(name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + argument_spec = dict( + checks=dict(default=None, required=False, type='list'), + delay=dict(required=False,type='str', default='15s'), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + id=dict(required=False), + name=dict(required=False), + node=dict(required=False), + state=dict(default='present', + choices=['present', 'absent', 'info', 'node', 'list']) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 9af22116245..3a3cf4dfff1 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -57,11 +57,12 @@ options: description: - The name of the database to add/remove the user from required: true - user: + name: description: - The name of the user to add or remove required: true default: null + aliases: [ 'user' ] password: description: - The password to use for the user @@ -178,7 +179,7 @@ def main(): login_port=dict(default='27017'), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), - user=dict(required=True, aliases=['name']), + name=dict(required=True, aliases=['user']), password=dict(aliases=['pass']), ssl=dict(default=False), roles=dict(default=None, type='list'), @@ -195,7 +196,7 @@ def main(): login_port = module.params['login_port'] replica_set = module.params['replica_set'] db_name = module.params['database'] - user = module.params['user'] + user = module.params['name'] password = module.params['password'] ssl = module.params['ssl'] roles = module.params['roles'] diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index b93150a43b5..30811cdc924 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -54,6 +54,12 @@ options: description: - mysql host to connect required: False + login_port: + description: + - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used + required: False + default: 3306 + version_added: "1.9" login_unix_socket: description: - unix socket to connect mysql server @@ -115,6 +121,9 @@ EXAMPLES = ''' # Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 - mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 + +# Check slave status using port 3308 +- mysql_replication: mode=getslave login_host=ansible.example.com login_port=3308 ''' import ConfigParser @@ -230,17 +239,18 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), - master_port=dict(default=None), - master_connect_retry=dict(default=None), + master_port=dict(default=None, type='int'), + master_connect_retry=dict(default=None, type='int'), master_log_file=dict(default=None), - master_log_pos=dict(default=None), + master_log_pos=dict(default=None, type='int'), relay_log_file=dict(default=None), - relay_log_pos=dict(default=None), + relay_log_pos=dict(default=None, type='int'), master_ssl=dict(default=False, type='bool'), master_ssl_ca=dict(default=None), master_ssl_capath=dict(default=None), @@ -252,6 +262,7 @@ def main(): user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] + port = module.params["login_port"] mode = module.params["mode"] master_host = module.params["master_host"] master_user = module.params["master_user"] @@ -293,8 +304,10 @@ def main(): try: if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) + elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost": + module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") try: @@ -317,7 +330,6 @@ def main(): module.fail_json(msg="Server is not configured as mysql slave") elif mode in "changemaster": - print "Change master" chm=[] chm_params = {} if master_host: @@ -329,22 +341,22 @@ def main(): if master_password: chm.append("MASTER_PASSWORD=%(master_password)s") chm_params['master_password'] = master_password - if master_port: + if master_port is not None: chm.append("MASTER_PORT=%(master_port)s") chm_params['master_port'] = master_port - if master_connect_retry: + if master_connect_retry is not None: chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s") chm_params['master_connect_retry'] = master_connect_retry if master_log_file: chm.append("MASTER_LOG_FILE=%(master_log_file)s") chm_params['master_log_file'] = master_log_file - if master_log_pos: + if master_log_pos is not None: chm.append("MASTER_LOG_POS=%(master_log_pos)s") chm_params['master_log_pos'] = master_log_pos if relay_log_file: chm.append("RELAY_LOG_FILE=%(relay_log_file)s") chm_params['relay_log_file'] = relay_log_file - if relay_log_pos: + if relay_log_pos is not None: chm.append("RELAY_LOG_POS=%(relay_log_pos)s") chm_params['relay_log_pos'] = relay_log_pos if master_ssl: diff --git a/database/postgresql/__init__.py b/database/postgresql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py new file mode 100644 index 00000000000..d70107a4cf9 --- /dev/null +++ b/database/postgresql/postgresql_ext.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: postgresql_ext +short_description: Add or remove PostgreSQL extensions from a database. +description: + - Add or remove PostgreSQL extensions from a database. +version_added: "0.1" +options: + name: + description: + - name of the extension to add or remove + required: true + default: null + db: + description: + - name of the database to add or remove the extension to/from + required: true + default: null + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + default: localhost + port: + description: + - Database port to connect to. + required: false + default: 5432 + state: + description: + - The database extension state + required: false + default: present + choices: [ "present", "absent" ] +notes: + - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. + - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on + the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. +requirements: [ psycopg2 ] +author: Daniel Schep +''' + +EXAMPLES = ''' +# Adds postgis to the database "acme" +- postgresql_ext: name=postgis db=acme +''' + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + postgresqldb_found = False +else: + postgresqldb_found = True + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def ext_exists(cursor, ext): + query = "SELECT * FROM pg_extension WHERE extname=%(ext)s" + cursor.execute(query, {'ext': ext}) + return cursor.rowcount == 1 + +def ext_delete(cursor, ext): + if ext_exists(cursor, ext): + query = "DROP EXTENSION \"%s\"" % ext + cursor.execute(query) + return True + else: + return False + +def ext_create(cursor, ext): + if not ext_exists(cursor, ext): + query = 'CREATE EXTENSION "%s"' % ext + cursor.execute(query) + return True + else: + return False + +# =========================================== +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default="postgres"), + login_password=dict(default=""), + login_host=dict(default=""), + port=dict(default="5432"), + db=dict(required=True), + ext=dict(required=True, aliases=['name']), + state=dict(default="present", choices=["absent", "present"]), + ), + supports_check_mode = True + ) + + if not postgresqldb_found: + module.fail_json(msg="the python psycopg2 module is required") + + db = module.params["db"] + ext = module.params["ext"] + port = module.params["port"] + state = module.params["state"] + changed = False + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host":"host", + "login_user":"user", + "login_password":"password", + "port":"port" + } + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + if k in params_map and v != '' ) + try: + db_connection = psycopg2.connect(database=db, **kw) + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2 + .extensions + .ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor( + cursor_factory=psycopg2.extras.DictCursor) + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + try: + if module.check_mode: + if state == "absent": + changed = not db_exists(cursor, ext) + elif state == "present": + changed = db_exists(cursor, ext) + module.exit_json(changed=changed,ext=ext) + + if state == "absent": + changed = ext_delete(cursor, ext) + + elif state == "present": + changed = ext_create(cursor, ext) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except Exception, e: + module.fail_json(msg="Database query failed: %s" % e) + + module.exit_json(changed=changed, db=db) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/database/postgresql/postgresql_lang.py b/database/postgresql/postgresql_lang.py new file mode 100644 index 00000000000..ec0507b5508 --- /dev/null +++ b/database/postgresql/postgresql_lang.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jens Depuydt + +DOCUMENTATION = ''' +--- +module: postgresql_lang +short_description: Adds, removes or changes procedural languages with a PostgreSQL database. +description: + - Adds, removes or changes procedural languages with a PostgreSQL database. + - This module allows you to add a language, remote a language or change the trust + relationship with a PostgreSQL database. The module can be used on the machine + where executed or on a remote host. + - When removing a language from a database, it is possible that dependencies prevent + the database from being removed. In that case, you can specify casade to + automatically drop objects that depend on the language (such as functions in the + language). In case the language can't be deleted because it is required by the + database system, you can specify fail_on_drop=no to ignore the error. + - Be carefull when marking a language as trusted since this could be a potential + security breach. Untrusted languages allow only users with the PostgreSQL superuser + privilege to use this language to create new functions. +version_added: "1.7" +options: + lang: + description: + - name of the procedural language to add, remove or change + required: true + default: null + trust: + description: + - make this language trusted for the selected db + required: false + default: no + choices: [ "yes", "no" ] + db: + description: + - name of database where the language will be added, removed or changed + required: false + default: null + force_trust: + description: + - marks the language as trusted, even if it's marked as untrusted in pg_pltemplate. + - use with care! + required: false + default: no + choices: [ "yes", "no" ] + fail_on_drop: + description: + - if C(yes), fail when removing a language. Otherwise just log and continue + - in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade). + required: false + default: 'yes' + choices: [ "yes", "no" ] + cascade: + description: + - when dropping a language, also delete object that depend on this language. + - only used when C(state=absent). + required: false + default: no + choices: [ "yes", "no" ] + port: + description: + - Database port to connect to. + required: false + default: 5432 + login_user: + description: + - User used to authenticate with PostgreSQL + required: false + default: postgres + login_password: + description: + - Password used to authenticate with PostgreSQL (must match C(login_user)) + required: false + default: null + login_host: + description: + - Host running PostgreSQL where you want to execute the actions. + required: false + default: localhost + state: + description: + - The state of the language for the selected database + required: false + default: present + choices: [ "present", "absent" ] +notes: + - The default authentication assumes that you are either logging in as or + sudo'ing to the postgres account on the host. + - This module uses psycopg2, a Python PostgreSQL database adapter. You must + ensure that psycopg2 is installed on the host before using this module. If + the remote host is the PostgreSQL server (which is the default case), then + PostgreSQL must also be installed on the remote host. For Ubuntu-based + systems, install the postgresql, libpq-dev, and python-psycopg2 packages + on the remote host before using this module. +requirements: [ psycopg2 ] +author: Jens Depuydt +''' + +EXAMPLES = ''' +# Add language pltclu to database testdb if it doesn't exist: +- postgresql_lang db=testdb lang=pltclu state=present + +# Add language pltclu to database testdb if it doesn't exist and mark it as trusted: +# Marks the language as trusted if it exists but isn't trusted yet +# force_trust makes sure that the language will be marked as trusted +- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes + +# Remove language pltclu from database testdb: +- postgresql_lang: db=testdb lang=pltclu state=absent + +# Remove language pltclu from database testdb and remove all dependencies: +- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes + +# Remove language c from database testdb but ignore errors if something prevents the removal: +- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no +''' + +try: + import psycopg2 +except ImportError: + postgresqldb_found = False +else: + postgresqldb_found = True + +def lang_exists(cursor, lang): + """Checks if language exists for db""" + query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang + cursor.execute(query) + return cursor.rowcount > 0 + +def lang_istrusted(cursor, lang): + """Checks if language is trusted for db""" + query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang + cursor.execute(query) + return cursor.fetchone()[0] + +def lang_altertrust(cursor, lang, trust): + """Changes if language is trusted for db""" + query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s" + cursor.execute(query, (trust, lang)) + return True + +def lang_add(cursor, lang, trust): + """Adds language for db""" + if trust: + query = 'CREATE TRUSTED LANGUAGE "%s"' % lang + else: + query = 'CREATE LANGUAGE "%s"' % lang + cursor.execute(query) + return True + +def lang_drop(cursor, lang, cascade): + """Drops language for db""" + cursor.execute("SAVEPOINT ansible_pgsql_lang_drop") + try: + if cascade: + cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang) + else: + cursor.execute("DROP LANGUAGE \"%s\"" % lang) + except: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return False + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return True + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default="postgres"), + login_password=dict(default=""), + login_host=dict(default=""), + db=dict(required=True), + port=dict(default='5432'), + lang=dict(required=True), + state=dict(default="present", choices=["absent", "present"]), + trust=dict(type='bool', default='no'), + force_trust=dict(type='bool', default='no'), + cascade=dict(type='bool', default='no'), + fail_on_drop=dict(type='bool', default='yes'), + ), + supports_check_mode = True + ) + + db = module.params["db"] + port = module.params["port"] + lang = module.params["lang"] + state = module.params["state"] + trust = module.params["trust"] + force_trust = module.params["force_trust"] + cascade = module.params["cascade"] + fail_on_drop = module.params["fail_on_drop"] + + if not postgresqldb_found: + module.fail_json(msg="the python psycopg2 module is required") + + params_map = { + "login_host":"host", + "login_user":"user", + "login_password":"password", + "port":"port", + "db":"database" + } + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + if k in params_map and v != "" ) + try: + db_connection = psycopg2.connect(**kw) + cursor = db_connection.cursor() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + changed = False + lang_dropped = False + kw = dict(db=db,lang=lang,trust=trust) + + if state == "present": + if lang_exists(cursor, lang): + lang_trusted = lang_istrusted(cursor, lang) + if (lang_trusted and not trust) or (not lang_trusted and trust): + if module.check_mode: + changed = True + else: + changed = lang_altertrust(cursor, lang, trust) + else: + if module.check_mode: + changed = True + else: + changed = lang_add(cursor, lang, trust) + if force_trust: + changed = lang_altertrust(cursor, lang, trust) + + else: + if lang_exists(cursor, lang): + if module.check_mode: + changed = True + kw['lang_dropped'] = True + else: + changed = lang_drop(cursor, lang, cascade) + if fail_on_drop and not changed: + msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore" + module.fail_json(msg=msg) + kw['lang_dropped'] = changed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + module.exit_json(**kw) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/database/vertica/__init__.py b/database/vertica/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py new file mode 100644 index 00000000000..ad74c0f23f2 --- /dev/null +++ b/database/vertica/vertica_configuration.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_configuration +version_added: '2.0' +short_description: Updates Vertica configuration parameters. +description: + - Updates Vertica configuration parameters. +options: + name: + description: + - Name of the parameter to update. + required: true + value: + description: + - Value of the parameter to be set. + required: true + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: updating load_balance_policy + vertica_configuration: name=failovertostandbyafter value='8 hours' +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py new file mode 100644 index 00000000000..b7e0ac4ad5a --- /dev/null +++ b/database/vertica/vertica_facts.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_facts +version_added: '2.0' +short_description: Gathers Vertica database facts. +description: + - Gathers Vertica database facts. +options: + cluster: + description: + - Name of the cluster running the schema. + required: false + default: localhost + port: + description: + Database port to connect to. + required: false + default: 5433 + db: + description: + - Name of the database running the schema. + required: false + default: null + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: gathering vertica facts + vertica_facts: db=db_name +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + module.exit_json(changed=False, + ansible_facts={'vertica_schemas': schema_facts, + 'vertica_users': user_facts, + 'vertica_roles': role_facts, + 'vertica_configuration': configuration_facts, + 'vertica_nodes': node_facts}) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py new file mode 100644 index 00000000000..ef56a58a866 --- /dev/null +++ b/database/vertica/vertica_role.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_role +version_added: '2.0' +short_description: Adds or removes Vertica database roles and assigns roles to them. +description: + - Adds or removes Vertica database role and, optionally, assign other roles. +options: + name: + description: + - Name of the role to add or remove. + required: true + assigned_roles: + description: + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] + required: false + default: null + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a role. + required: false + choices: ['present', 'absent'] + default: present + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: creating a new vertica role + vertica_role: name=role_name db=db_name state=present + +- name: creating a new vertica role with other role assigned + vertica_role: name=role_name assigned_role=other_role_name state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + return False + return True + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py new file mode 100644 index 00000000000..d0ed2ce05b0 --- /dev/null +++ b/database/vertica/vertica_schema.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_schema +version_added: '2.0' +short_description: Adds or removes Vertica database schema and roles. +description: + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +options: + name: + description: + - Name of the schema to add or remove. + required: true + usage_roles: + description: + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] + required: false + default: null + create_roles: + description: + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] + required: false + default: null + owner: + description: + - Name of the user to set as owner of the schema. + required: false + default: null + state: + description: + - Whether to create C(present), or drop C(absent) a schema. + required: false + default: present + choices: ['present', 'absent'] + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: creating a new vertica schema + vertica_schema: name=schema_name db=db_name state=present + +- name: creating a new schema with specific schema owner + vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: creating a new schema with roles + vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: + return False + if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + return False + return True + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \ + cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(default=None, aliases=['usage_role']), + create_roles=dict(default=None, aliases=['create_role']), + owner=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py new file mode 100644 index 00000000000..a011bf35adb --- /dev/null +++ b/database/vertica/vertica_user.py @@ -0,0 +1,385 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_user +version_added: '2.0' +short_description: Adds or removes Vertica database users and assigns roles. +description: + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +options: + name: + description: + - Name of the user to add or remove. + required: true + profile: + description: + - Sets the user's profile. + required: false + default: null + resource_pool: + description: + - Sets the user's resource pool. + required: false + default: null + password: + description: + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + required: false + default: null + expired: + description: + - Sets the user's password expiration. + required: false + default: null + ldap: + description: + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). + required: false + default: null + roles: + description: + - Comma separated list of roles to assign to the user. + aliases: ['role'] + required: false + default: null + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a user. + required: false + choices: ['present', 'absent', 'locked'] + default: present + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: creating a new vertica user with password + vertica_user: name=user_name password=md5 db=db_name state=present + +- name: creating a new vertica user authenticated via ldap with roles assigned + vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \ + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'): + return False + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + return False + return True + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + state = 'lock' if locked else 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(default=None), + resource_pool=dict(default=None), + password=dict(default=None), + expired=dict(type='bool', default=None), + ldap=dict(type='bool', default=None), + roles=dict(default=None, aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/files/__init__.py b/files/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/files/patch.py b/files/patch.py new file mode 100755 index 00000000000..ec3a3b02c00 --- /dev/null +++ b/files/patch.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Luis Alberto Perez Lazaro +# (c) 2015, Jakub Jirutka +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: patch +author: Luis Alberto Perez Lazaro, Jakub Jirutka +version_added: 1.9 +description: + - Apply patch files using the GNU patch tool. +short_description: Apply patch files using the GNU patch tool. +options: + basedir: + description: + - Path of a base directory in which the patch file will be applied. + May be ommitted when C(dest) option is specified, otherwise required. + required: false + dest: + description: + - Path of the file on the remote machine to be patched. + - The names of the files to be patched are usually taken from the patch + file, but if there's just one file to be patched it can specified with + this option. + required: false + aliases: [ "originalfile" ] + src: + description: + - Path of the patch file as accepted by the GNU patch tool. If + C(remote_src) is False, the patch source file is looked up from the + module's "files" directory. + required: true + aliases: [ "patchfile" ] + remote_src: + description: + - If False, it will search for src at originating/master machine, if True it will + go to the remote/target machine for the src. Default is False. + choices: [ "True", "False" ] + required: false + default: "False" + strip: + description: + - Number that indicates the smallest prefix containing leading slashes + that will be stripped from each file name found in the patch file. + For more information see the strip parameter of the GNU patch tool. + required: false + type: "int" + default: "0" +note: + - This module requires GNU I(patch) utility to be installed on the remote host. +''' + +EXAMPLES = ''' +- name: apply patch to one file + patch: > + src=/tmp/index.html.patch + dest=/var/www/index.html + +- name: apply patch to multiple files under basedir + patch: > + src=/tmp/customize.patch + basedir=/var/www + strip=1 +''' + +import os +from os import path, R_OK, W_OK + + +class PatchError(Exception): + pass + + +def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0): + opts = ['--quiet', '--reverse', '--forward', '--dry-run', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + if dest_file: + opts.append("'%s'" % dest_file) + + (rc, _, _) = patch_func(opts) + return rc == 0 + + +def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False): + opts = ['--quiet', '--forward', '--batch', '--reject-file=-', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + if dry_run: + opts.append('--dry-run') + if dest_file: + opts.append("'%s'" % dest_file) + + (rc, out, err) = patch_func(opts) + if rc != 0: + msg = out if not err else err + raise PatchError(msg) + + +def main(): + module = AnsibleModule( + argument_spec={ + 'src': {'required': True, 'aliases': ['patchfile']}, + 'dest': {'aliases': ['originalfile']}, + 'basedir': {}, + 'strip': {'default': 0, 'type': 'int'}, + 'remote_src': {'default': False, 'type': 'bool'}, + }, + required_one_of=[['dest', 'basedir']], + supports_check_mode=True + ) + + # Create type object as namespace for module params + p = type('Params', (), module.params) + + p.src = os.path.expanduser(p.src) + if not os.access(p.src, R_OK): + module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) + + if p.dest and not os.access(p.dest, W_OK): + module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest)) + + if p.basedir and not path.exists(p.basedir): + module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) + + if not p.basedir: + p.basedir = path.dirname(p.dest) + + patch_bin = module.get_bin_path('patch') + if patch_bin is None: + module.fail_json(msg="patch command not found") + patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) + + # patch need an absolute file name + p.src = os.path.abspath(p.src) + + changed = False + if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): + try: + apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + dry_run=module.check_mode) + changed = True + except PatchError, e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index 1cbee360dff..f494ce802d9 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -162,7 +162,11 @@ class RabbitMqUser(object): return dict() def add(self): - self._exec(['add_user', self.username, self.password]) + if self.password is not None: + self._exec(['add_user', self.username, self.password]) + else: + self._exec(['add_user', self.username, '']) + self._exec(['clear_password', self.username]) def delete(self): self._exec(['delete_user', self.username]) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index 373f4f777ff..a19885ea702 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -35,11 +35,20 @@ options: choices: [ 'present', 'absent' ] required: false default: present + name: + description: + - name of the log + required: false + logtype: + description: + - type of the log + required: false + notes: - Requires the LogEntries agent which can be installed following the instructions at logentries.com ''' EXAMPLES = ''' -- logentries: path=/var/log/nginx/access.log state=present +- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log - logentries: path=/var/log/nginx/error.log state=absent ''' @@ -53,7 +62,7 @@ def query_log_status(module, le_path, path, state="present"): return False -def follow_log(module, le_path, logs): +def follow_log(module, le_path, logs, name=None, logtype=None): """ Follows one or more logs if not already followed. """ followed_count = 0 @@ -64,7 +73,13 @@ def follow_log(module, le_path, logs): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'follow', log]) + + cmd = [le_path, 'follow', log] + if name: + cmd.extend(['--name',name]) + if logtype: + cmd.extend(['--type',logtype]) + rc, out, err = module.run_command(' '.join(cmd)) if not query_log_status(module, le_path, log): module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) @@ -104,8 +119,10 @@ def unfollow_log(module, le_path, logs): def main(): module = AnsibleModule( argument_spec = dict( - path = dict(aliases=["name"], required=True), - state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) + path = dict(required=True), + state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), + name = dict(required=False, default=None, type='str'), + logtype = dict(required=False, default=None, type='str', aliases=['type']) ), supports_check_mode=True ) @@ -119,7 +136,7 @@ def main(): logs = filter(None, logs) if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs) + follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) diff --git a/monitoring/monit.py b/monitoring/monit.py index 558f1e696f2..8772d22b2d8 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -75,8 +75,8 @@ def main(): # Sample output lines: # Process 'name' Running # Process 'name' Running - restart pending - parts = line.lower().split() - if len(parts) > 2 and parts[0] == 'process' and parts[1] == "'%s'" % name: + parts = line.split() + if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: return ' '.join(parts[2:]) else: return '' diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 9219766b86a..c564e712b04 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -364,7 +364,7 @@ class Nagios(object): return notif_str - def schedule_svc_downtime(self, host, services=[], minutes=30): + def schedule_svc_downtime(self, host, services=None, minutes=30): """ This command is used to schedule downtime for a particular service. @@ -378,6 +378,10 @@ class Nagios(object): """ cmd = "SCHEDULE_SVC_DOWNTIME" + + if services is None: + services = [] + for service in services: dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) self._write_command(dt_cmd_str) @@ -518,7 +522,7 @@ class Nagios(object): notif_str = self._fmt_notif_str(cmd, host) self._write_command(notif_str) - def disable_svc_notifications(self, host, services=[]): + def disable_svc_notifications(self, host, services=None): """ This command is used to prevent notifications from being sent out for the specified service. @@ -530,6 +534,10 @@ class Nagios(object): """ cmd = "DISABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + for service in services: notif_str = self._fmt_notif_str(cmd, host, svc=service) self._write_command(notif_str) @@ -628,7 +636,7 @@ class Nagios(object): else: return "Fail: could not write to the command file" - def enable_svc_notifications(self, host, services=[]): + def enable_svc_notifications(self, host, services=None): """ Enables notifications for a particular service. @@ -638,6 +646,10 @@ class Nagios(object): """ cmd = "ENABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + nagios_return = True return_str_list = [] for service in services: diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 6f658cd9505..0ae1af357e0 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -111,7 +111,7 @@ def main(): ) if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") + module.fail_json(msg="Missing required pingdom module (check docs)") checkid = module.params['checkid'] state = module.params['state'] diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index a316405456b..489a8617f54 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -36,7 +36,7 @@ options: choices: [ 'present', 'absent' ] host_group: description: - - Name of the host groupto be added or removed. + - Name of the host group to be added or removed. required: true default: null aliases: [ ] diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py new file mode 100644 index 00000000000..c7b8e52b9e7 --- /dev/null +++ b/monitoring/zabbix_host.py @@ -0,0 +1,451 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_host +short_description: Zabbix host creates/updates/deletes +description: + - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. +version_added: "2.0" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name, used to authenticate against the server. + required: true + login_password: + description: + - Zabbix user password. + required: true + host_name: + description: + - Name of the host in Zabbix. + - host_name is the unique identifier used and cannot be updated using this module. + required: true + host_groups: + description: + - List of host groups the host is part of. + required: false + link_templates: + description: + - List of templates linked to the host. + required: false + default: None + status: + description: + - 'Monitoring status of the host. Possible values are: "enabled" and "disabled".' + required: false + default: "enabled" + state: + description: + - 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.' + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 + interfaces: + description: + - List of interfaces to be created for the host (see example below). + - 'Available values are: dns, ip, main, port, type and useip.' + - Please review the interface documentation for more information on the supported properties + - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface + required: false + default: [] +''' + +EXAMPLES = ''' +- name: Create a new host or update an existing host's info + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_groups: + - Example group1 + - Example group2 + link_templates: + - Example template1 + - Example template2 + status: enabled + state: present + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 10050 + - type: 4 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 12345 +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far), +# it does not support the 'hostinterface' api calls, +# so we have to inherit the ZabbixAPI class to add 'hostinterface' support. +class ZabbixAPIExtends(ZabbixAPI): + hostinterface = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs)) + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % group_name) + return True + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}}) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template) + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_host(self, host_name, group_ids, status, interfaces): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}) + if len(host_list) >= 1: + return host_list['hostids'][0] + except Exception, e: + self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) + + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status}) + interface_list_copy = exist_interface_list + if interfaces: + for interface in interfaces: + flag = False + interface_str = interface + for exist_interface in exist_interface_list: + interface_type = interface['type'] + exist_interface_type = int(exist_interface['type']) + if interface_type == exist_interface_type: + # update + interface_str['interfaceid'] = exist_interface['interfaceid'] + self._zapi.hostinterface.update(interface_str) + flag = True + interface_list_copy.remove(exist_interface) + break + if not flag: + # add + interface_str['hostid'] = host_id + self._zapi.hostinterface.create(interface_str) + # remove + remove_interface_ids = [] + for remove_interface in interface_list_copy: + interface_id = remove_interface['interfaceid'] + remove_interface_ids.append(interface_id) + if len(remove_interface_ids) > 0: + self._zapi.hostinterface.delete(remove_interface_ids) + except Exception, e: + self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) + + def delete_host(self, host_id, host_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.delete({'hostid': host_id}) + except Exception, e: + self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) + + # get host by host name + def get_host_by_host_name(self, host_name): + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + group_ids = [] + if self.check_host_group_exist(group_names): + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append({'groupid': group_id}) + return group_ids + + # get host templates by host id + def get_host_templates_by_host_id(self, host_id): + template_ids = [] + template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id}) + for template in template_list: + template_ids.append(template['templateid']) + return template_ids + + # get host groups by host id + def get_host_groups_by_host_id(self, host_id): + exist_host_groups = [] + host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id}) + + if len(host_groups_list) >= 1: + for host_groups_name in host_groups_list: + exist_host_groups.append(host_groups_name['name']) + return exist_host_groups + + # check the exist_interfaces whether it equals the interfaces or not + def check_interface_properties(self, exist_interface_list, interfaces): + interfaces_port_list = [] + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(int(interface['port'])) + + exist_interface_ports = [] + if len(exist_interface_list) >= 1: + for exist_interface in exist_interface_list: + exist_interface_ports.append(int(exist_interface['port'])) + + if set(interfaces_port_list) != set(exist_interface_ports): + return True + + for exist_interface in exist_interface_list: + exit_interface_port = int(exist_interface['port']) + for interface in interfaces: + interface_port = int(interface['port']) + if interface_port == exit_interface_port: + for key in interface.keys(): + if str(exist_interface[key]) != str(interface[key]): + return True + + return False + + # get the status of host by host + def get_host_status_by_host(self, host): + return host['status'] + + # check all the properties before link or clear template + def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, host): + # get the existing host's groups + exist_host_groups = self.get_host_groups_by_host_id(host_id) + if set(host_groups) != set(exist_host_groups): + return True + + # get the existing status + exist_status = self.get_host_status_by_host(host) + if int(status) != int(exist_status): + return True + + # check the exist_interfaces whether it equals the interfaces or not + if self.check_interface_properties(exist_interfaces, interfaces): + return True + + # get the existing templates + exist_template_ids = self.get_host_templates_by_host_id(host_id) + if set(list(template_ids)) != set(exist_template_ids): + return True + + return False + + # link or clear template of the host + def link_or_clear_template(self, host_id, template_id_list): + # get host's exist template ids + exist_template_id_list = self.get_host_templates_by_host_id(host_id) + + exist_template_ids = set(exist_template_id_list) + template_ids = set(template_id_list) + template_id_list = list(template_ids) + + # get unlink and clear templates + templates_clear = exist_template_ids.difference(template_ids) + templates_clear_list = list(templates_clear) + request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception, e: + self._module.fail_json(msg="Failed to link template to host: %s" % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + host_groups=dict(required=False), + link_templates=dict(required=False), + status=dict(default="enabled"), + state=dict(default="present"), + timeout=dict(default=10), + interfaces=dict(required=False) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + host_groups = module.params['host_groups'] + link_templates = module.params['link_templates'] + status = module.params['status'] + state = module.params['state'] + timeout = module.params['timeout'] + interfaces = module.params['interfaces'] + + # convert enabled to 0; disabled to 1 + status = 1 if status == "disabled" else 0 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + template_ids = [] + if link_templates: + template_ids = host.get_template_ids(link_templates) + + group_ids = [] + + if host_groups: + group_ids = host.get_group_ids_by_group_names(host_groups) + + ip = "" + if interfaces: + for interface in interfaces: + if interface['type'] == 1: + ip = interface['ip'] + + # check if host exist + is_host_exist = host.is_host_exist(host_name) + + if is_host_exist: + # get host id by host name + zabbix_host_obj = host.get_host_by_host_name(host_name) + host_id = zabbix_host_obj['hostid'] + + if state == "absent": + # remove host + host.delete_host(host_id, host_name) + module.exit_json(changed=True, result="Successfully delete host %s" % host_name) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + + # get exist host's interfaces + exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) + exist_interfaces_copy = copy.deepcopy(exist_interfaces) + + # update host + interfaces_len = len(interfaces) if interfaces else 0 + + if len(exist_interfaces) > interfaces_len: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, zabbix_host_obj): + host.link_or_clear_template(host_id, template_ids) + host.update_host(host_name, group_ids, status, host_id, + interfaces, exist_interfaces) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces_copy, zabbix_host_obj): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) + + if not interfaces or (interfaces and len(interfaces) == 0): + module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) + + # create host + host_id = host.add_host(host_name, group_ids, status, interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( + host_name, ip, link_templates)) + +from ansible.module_utils.basic import * +main() + diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py new file mode 100644 index 00000000000..b41e114d760 --- /dev/null +++ b/monitoring/zabbix_hostmacro.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_hostmacro +short_description: Zabbix host macro creates/updates/deletes +description: + - manages Zabbix host macros, it can create, update or delete them. +version_added: "2.0" +author: Dean Hailin Song +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + login_password: + description: + - Zabbix user password. + required: true + host_name: + description: + - Name of the host. + required: true + macro_name: + description: + - Name of the host macro. + required: true + macro_value: + description: + - Value of the host macro. + required: true + state: + description: + - 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.' + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 +''' + +EXAMPLES = ''' +- name: Create a new host macro or update an existing macro's value + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name:Example macro + macro_value:Example value + state: present +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far). +class ZabbixAPIExtends(ZabbixAPI): + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + + +class HostMacro(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # get host id by host name + def get_host_id(self, host_name): + try: + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + host_id = host_list[0]['hostid'] + return host_id + except Exception, e: + self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) + + # get host macro + def get_host_macro(self, macro_name, host_id): + try: + host_macro_list = self._zapi.usermacro.get( + {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}}) + if len(host_macro_list) > 0: + return host_macro_list[0] + return None + except Exception, e: + self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) + + # create host macro + def create_host_macro(self, macro_name, macro_value, host_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) + + # update host macro + def update_host_macro(self, host_macro_obj, macro_name, macro_value): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e)) + + # delete host macro + def delete_host_macro(self, host_macro_obj, macro_name): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.delete([host_macro_id]) + self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + macro_name=dict(required=True), + macro_value=dict(required=True), + state=dict(default="present"), + timeout=dict(default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + macro_name = (module.params['macro_name']).upper() + macro_value = module.params['macro_value'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host_macro_class_obj = HostMacro(module, zbx) + + changed = False + + if host_name: + host_id = host_macro_class_obj.get_host_id(host_name) + host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id) + + if state == 'absent': + if not host_macro_obj: + module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name) + else: + # delete a macro + host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name) + else: + if not host_macro_obj: + # create host macro + host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id) + else: + # update host macro + host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) + +from ansible.module_utils.basic import * +main() + diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py new file mode 100644 index 00000000000..ada2b1c6ab0 --- /dev/null +++ b/monitoring/zabbix_screen.py @@ -0,0 +1,415 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: zabbix_screen +short_description: Zabbix screen creates/updates/deletes +description: + - This module allows you to create, modify and delete Zabbix screens and associated graph data. +version_added: "2.0" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + login_password: + description: + - Zabbix user password. + required: true + timeout: + description: + - The timeout of API request(seconds). + default: 10 + zabbix_screens: + description: + - List of screens to be created/updated/deleted(see example). + - If the screen(s) already been added, the screen(s) name won't be updated. + - When creating or updating screen(s), the screen_name, host_group are required. + - When deleting screen(s), the screen_name is required. + - 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.' + required: true +notes: + - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = ''' +# Create/update a screen. +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Create/update multi-screen +- name: Create two of new screens or update the existing screens' items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + - screen_name: ExampleScreen2 + host_group: Example group2 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + screens: + - screen_name: ExampleScreen + host_group: Example group + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + when: inventory_hostname==groups['group_name'][0] +''' + +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import ZabbixAPIException + from zabbix_api import Already_Exists + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call, +# we have to inherit the ZabbixAPI class to add 'screenitem' support. +class ZabbixAPIExtends(ZabbixAPI): + screenitem = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) + + +class Screen(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get group id by group name + def get_host_group_id(self, group_name): + if group_name == "": + self._module.fail_json(msg="group_name is required") + hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) + if len(hostGroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % group_name) + else: + hostGroup_id = hostGroup_list[0]['groupid'] + return hostGroup_id + + # get monitored host_id by host_group_id + def get_host_ids_by_group_id(self, group_id): + host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) + if len(host_list) < 1: + self._module.fail_json(msg="No host in the group.") + else: + host_ids = [] + for i in host_list: + host_id = i['hostid'] + host_ids.append(host_id) + return host_ids + + # get screen + def get_screen_id(self, screen_name): + if screen_name == "": + self._module.fail_json(msg="screen_name is required") + try: + screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) + if len(screen_id_list) >= 1: + screen_id = screen_id_list[0]['screenid'] + return screen_id + return None + except Exception as e: + self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) + + # create screen + def create_screen(self, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) + return screen['screenids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) + + # update screen + def update_screen(self, screen_id, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) + except Exception as e: + self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) + + # delete screen + def delete_screen(self, screen_id, screen_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.delete([screen_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) + + # get graph ids + def get_graph_ids(self, hosts, graph_name_list): + graph_id_lists = [] + vsize = 1 + for host in hosts: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + size = len(graph_id_list) + if size > 0: + graph_id_lists.extend(graph_id_list) + if vsize < size: + vsize = size + return graph_id_lists, vsize + + # getGraphs + def get_graphs_by_host_id(self, graph_name_list, host_id): + graph_ids = [] + for graph_name in graph_name_list: + graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) + graph_id_list = [] + if len(graphs_list) > 0: + for graph in graphs_list: + graph_id = graph['graphid'] + graph_id_list.append(graph_id) + if len(graph_id_list) > 0: + graph_ids.extend(graph_id_list) + return graph_ids + + # get screen items + def get_screen_items(self, screen_id): + screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) + return screen_item_list + + # delete screen items + def delete_screen_items(self, screen_id, screen_item_id_list): + try: + if len(screen_item_id_list) == 0: + return True + screen_item_list = self.get_screen_items(screen_id) + if len(screen_item_list) > 0: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screenitem.delete(screen_item_id_list) + return True + return False + except ZabbixAPIException: + pass + + # get screen's hsize and vsize + def get_hsize_vsize(self, hosts, v_size): + h_size = len(hosts) + if h_size == 1: + if v_size == 1: + h_size = 1 + elif v_size in range(2, 9): + h_size = 2 + else: + h_size = 3 + v_size = (v_size - 1) / h_size + 1 + return h_size, v_size + + # create screen_items + def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size): + if len(hosts) < 4: + if width is None or width < 0: + width = 500 + else: + if width is None or width < 0: + width = 200 + if height is None or height < 0: + height = 100 + + try: + # when there're only one host, only one row is not good. + if len(hosts) == 1: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) + for i, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + else: + for i, host in enumerate(hosts): + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + for j, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + except Already_Exists: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + timeout=dict(default=10), + screens=dict(required=True) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + timeout = module.params['timeout'] + screens = module.params['screens'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + screen = Screen(module, zbx) + created_screens = [] + changed_screens = [] + deleted_screens = [] + + for zabbix_screen in screens: + screen_name = zabbix_screen['screen_name'] + screen_id = screen.get_screen_id(screen_name) + state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present" + + if state == "absent": + if screen_id: + screen_item_list = screen.get_screen_items(screen_id) + screen_item_id_list = [] + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + screen_item_id_list.append(screen_item_id) + screen.delete_screen_items(screen_id, screen_item_id_list) + screen.delete_screen(screen_id, screen_name) + + deleted_screens.append(screen_name) + else: + host_group = zabbix_screen['host_group'] + graph_names = zabbix_screen['graph_names'] + graph_width = None + if 'graph_width' in zabbix_screen: + graph_width = zabbix_screen['graph_width'] + graph_height = None + if 'graph_height' in zabbix_screen: + graph_height = zabbix_screen['graph_height'] + host_group_id = screen.get_host_group_id(host_group) + hosts = screen.get_host_ids_by_group_id(host_group_id) + + screen_item_id_list = [] + resource_id_list = [] + + graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) + h_size, v_size = screen.get_hsize_vsize(hosts, v_size) + + if not screen_id: + # create screen + screen_id = screen.create_screen(screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + created_screens.append(screen_name) + else: + screen_item_list = screen.get_screen_items(screen_id) + + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + resource_id = screen_item['resourceid'] + screen_item_id_list.append(screen_item_id) + resource_id_list.append(resource_id) + + # when the screen items changed, then update + if graph_ids != resource_id_list: + deleted = screen.delete_screen_items(screen_id, screen_item_id_list) + if deleted: + screen.update_screen(screen_id, screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + changed_screens.append(screen_name) + + if created_screens and changed_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) + elif created_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) + elif changed_screens: + module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) + elif deleted_screens: + module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) + else: + module.exit_json(changed=False) + +# <> +main() diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index de3c8fc2421..b2f87aa0d08 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -97,7 +97,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api ''' -import json import base64 import socket diff --git a/network/dnsimple.py b/network/dnsimple.py index 363a2ca24c1..9aa52172f19 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -32,7 +32,7 @@ options: description: - Account API token. See I(account_email) for info. required: false - default: null + default: null domain: description: @@ -67,7 +67,7 @@ options: default: 3600 (one hour) value: - description: + description: - Record value - "Must be specified when trying to ensure a record exists" required: false @@ -133,9 +133,9 @@ import os try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True except ImportError: - print "failed=True msg='dnsimple required for this module'" - sys.exit(1) + HAS_DNSIMPLE = False def main(): module = AnsibleModule( @@ -148,7 +148,7 @@ def main(): type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl = dict(required=False, default=3600, type='int'), value = dict(required=False), - priority = dict(required=False, type='int'), + priority = dict(required=False, type='int'), state = dict(required=False, choices=['present', 'absent']), solo = dict(required=False, type='bool'), ), @@ -158,6 +158,9 @@ def main(): supports_check_mode = True, ) + if not HAS_DNSIMPLE: + module.fail_json("dnsimple required for this module") + account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..f74c66b6036 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 session: description: - BIG-IP session support; may be useful to avoid concurrency @@ -70,8 +78,8 @@ options: required: true default: null choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', + 'device', 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan'] aliases: [] filter: @@ -105,7 +113,7 @@ EXAMPLES = ''' try: import bigsuds - from suds import MethodNotFound + from suds import MethodNotFound, WebFault except ImportError: bigsuds_found = False else: @@ -1364,7 +1372,7 @@ def generate_dict(api_obj, fields): for field in fields: try: api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: + except (MethodNotFound, WebFault): pass else: lists.append(api_response) @@ -1380,7 +1388,7 @@ def generate_simple_dict(api_obj, fields): for field in fields: try: api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: + except (MethodNotFound, WebFault): pass else: result_dict[field] = api_response @@ -1566,6 +1574,12 @@ def generate_software_list(f5): software_list = software.get_all_software_status() return software_list +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def main(): module = AnsibleModule( @@ -1573,6 +1587,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), session = dict(type='bool', default=False), include = dict(type='list', required=True), filter = dict(type='str', required=False), @@ -1585,6 +1600,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] if fact_filter: @@ -1593,14 +1609,17 @@ def main(): regex = None include = map(lambda x: x.lower(), module.params['include']) valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', + 'device', 'device_group', 'interface', 'key', 'node', + 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + if not validate_certs: + disable_ssl_cert_validation() + try: facts = {} diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 62823f86579..d131eb71eee 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -51,6 +51,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Monitor state @@ -177,6 +185,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -311,6 +327,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -331,6 +348,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -348,6 +366,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 8b89a0c6113..5cc00fe6b68 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -49,6 +49,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Monitor state @@ -196,6 +204,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -331,6 +347,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -351,6 +368,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -372,6 +390,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 68b6a2b52f1..ca212763881 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -54,12 +54,20 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool member state required: true default: present - choices: ['present', 'absent'] + choices: ['present', 'absent', 'enabled', 'disabled'] aliases: [] partition: description: @@ -70,7 +78,7 @@ options: aliases: [] name: description: - - "Node name" + - "Node name. Required when state=enabled/disabled" required: false default: null choices: [] @@ -137,6 +145,11 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" + - name: Disable node + bigip_node: server=lb.mydomain.com user=admin password=mysecret + state=disabled name=mynodename + delegate_to: localhost + ''' try: @@ -150,10 +163,23 @@ else: # bigip_node module specific # +# map of state values +STATES={'enabled': 'STATE_ENABLED', + 'disabled': 'STATE_DISABLED'} +STATUSES={'enabled': 'SESSION_STATUS_ENABLED', + 'disabled': 'SESSION_STATUS_DISABLED', + 'offline': 'SESSION_STATUS_FORCED_DISABLED'} + def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def node_exists(api, address): # hack to determine if node exists result = False @@ -206,13 +232,34 @@ def set_node_description(api, name, description): def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] +def set_node_disabled(api, name): + set_node_session_enabled_state(api, name, STATES['disabled']) + result = True + desc = "" + return (result, desc) + +def set_node_enabled(api, name): + set_node_session_enabled_state(api, name, STATES['enabled']) + result = True + desc = "" + return (result, desc) + +def set_node_session_enabled_state(api, name, state): + api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], + states=[state]) + +def get_node_session_status(api, name): + return api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] + def main(): module = AnsibleModule( argument_spec = dict( server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), + validate_certs = dict(default='yes', type='bool'), + state = dict(type='str', default='present', + choices=['present', 'absent', 'disabled', 'enabled']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), @@ -227,6 +274,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] host = module.params['host'] @@ -234,6 +282,9 @@ def main(): address = "/%s/%s" % (partition, name) description = module.params['description'] + if not validate_certs: + disable_ssl_cert_validation() + if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") @@ -283,6 +334,32 @@ def main(): set_node_description(api, address, description) result = {'changed': True} + elif state in ('disabled', 'enabled'): + if name is None: + module.fail_json(msg="name parameter required when " \ + "state=enabled/disabled") + if not module.check_mode: + if not node_exists(api, name): + module.fail_json(msg="node does not exist") + status = get_node_session_status(api, name) + if state == 'disabled': + if status not in (STATUSES['disabled'], STATUSES['offline']): + disabled, desc = set_node_disabled(api, name) + if not disabled: + module.fail_json(msg="unable to disable: %s" % desc) + else: + result = {'changed': True} + else: + if status != STATUSES['enabled']: + enabled, desc = set_node_enabled(api, name) + if not enabled: + module.fail_json(msg="unable to enable: %s" % desc) + else: + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + except Exception, e: module.fail_json(msg="received exception: %s" % e) diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 48d03b9f1cb..425c1e97149 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool/pool member state @@ -235,6 +243,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -359,6 +373,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True, aliases=['pool']), partition = dict(type='str', default='Common'), @@ -380,6 +395,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] name = module.params['name'] partition = module.params['partition'] @@ -407,6 +423,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 5aef9f0ae98..1304dfe33e5 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool member state @@ -189,6 +197,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -282,6 +296,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), pool = dict(type='str', required=True), partition = dict(type='str', default='Common'), @@ -301,6 +316,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] pool = "/%s/%s" % (partition, module.params['pool']) @@ -312,6 +328,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): diff --git a/network/haproxy.py b/network/haproxy.py index 51b28d623a0..38757599df5 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -243,7 +243,7 @@ def main(): if not socket: module.fail_json(msg="unable to locate haproxy socket") - ansible_haproxy = HAProxy(module, **module.params) + ansible_haproxy = HAProxy(module) ansible_haproxy.act() # import module snippets diff --git a/network/lldp.py b/network/lldp.py index 6b8836852f6..ea6dc78d7bc 100755 --- a/network/lldp.py +++ b/network/lldp.py @@ -54,10 +54,12 @@ def gather_lldp(): lldp_entries = output.split("\n") for entry in lldp_entries: - if entry: + if entry.startswith('lldp'): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: diff --git a/notification/hipchat.py b/notification/hipchat.py index 4ff95b32bf6..24fde9ecb35 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -137,7 +137,7 @@ def main(): try: send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) + module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) diff --git a/notification/mail.py b/notification/mail.py index 34cd3a09bf3..ae33c5ca4ca 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -62,13 +62,24 @@ options: subject: description: - The subject of the email being sent. - aliases: [ msg ] required: true body: description: - The body of the email being sent. default: $subject required: false + username: + description: + - If SMTP requires username + default: null + required: false + version_added: "1.9" + password: + description: + - If SMTP requires password + default: null + required: false + version_added: "1.9" host: description: - The mail server @@ -103,7 +114,17 @@ options: EXAMPLES = ''' # Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.' + +# Sending an e-mail using Gmail SMTP servers +- local_action: mail + host='smtp.gmail.com' + port=587 + username=username@gmail.com + password='mysecret' + to="John Smith " + subject='Ansible-report' + body='System {{ ansible_hostname }} has been successfully provisioned.' # Send e-mail to a bunch of users, attaching files - local_action: mail @@ -122,6 +143,7 @@ EXAMPLES = ''' import os import sys import smtplib +import ssl try: from email import encoders @@ -142,6 +164,8 @@ def main(): module = AnsibleModule( argument_spec = dict( + username = dict(default=None), + password = dict(default=None), host = dict(default='localhost'), port = dict(default='25'), sender = dict(default='root', aliases=['from']), @@ -156,6 +180,8 @@ def main(): ) ) + username = module.params.get('username') + password = module.params.get('password') host = module.params.get('host') port = module.params.get('port') sender = module.params.get('sender') @@ -167,17 +193,27 @@ def main(): attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') - sender_phrase, sender_addr = parseaddr(sender) if not body: body = subject try: - smtp = smtplib.SMTP(host, port=int(port)) + try: + smtp = smtplib.SMTP_SSL(host, port=int(port)) + except (smtplib.SMTPException, ssl.SSLError): + smtp = smtplib.SMTP(host, port=int(port)) except Exception, e: module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) + smtp.ehlo() + if username and password: + if smtp.has_extn('STARTTLS'): + smtp.starttls() + try: + smtp.login(username, password) + except smtplib.SMTPAuthenticationError: + module.fail_json(msg="Authentication to %s:%s failed, please check your username and/or password" % (host, port)) msg = MIMEMultipart() msg['Subject'] = subject diff --git a/notification/pushover b/notification/pushover new file mode 100644 index 00000000000..3e710ca02dd --- /dev/null +++ b/notification/pushover @@ -0,0 +1,106 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2012, Jim Richardson +# All rights reserved. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +### + +DOCUMENTATION = ''' +--- +module: pushover +version_added: "2.0" +short_description: Send notifications via u(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +options: + msg: + description: + What message you wish to send. + required: true + app_token: + description: + Pushover issued token identifying your pushover app. + required: true + user_key: + description: + Pushover issued authentication key for your user. + required: true + pri: + description: Message priority (see u(https://pushover.net) for details.) + required: false + +author: Jim Richardson +''' + +EXAMPLES = ''' +- local_action: pushover msg="{{inventory_hostname}} has exploded in flames, + It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 +''' + +import urllib +import httplib + + +class pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + + def __init__(self): + self.host, self.port = 'api.pushover.net', 443 + + def run(self): + ''' Do, whatever it is, we do. ''' + # parse config + conn = httplib.HTTPSConnection(self.host, self.port) + conn.request("POST", "/1/messages.json", + urllib.urlencode(self.options), + {"Content-type": "application/x-www-form-urlencoded"}) + conn.getresponse() + return + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + app_token=dict(required=True), + user_key=dict(required=True), + pri=dict(required=False, default=0), + ), + ) + + msg_object = pushover() + msg_object.options = {} + msg_object.options['user'] = module.params['user_key'] + msg_object.options['token'] = module.params['app_token'] + msg_object.options['priority'] = module.params['pri'] + msg_object.options['message'] = module.params['msg'] + try: + msg_object.run() + except: + module.fail_json(msg='Unable to send msg via pushover') + + module.exit_json(msg=msg, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/notification/sendgrid.py b/notification/sendgrid.py new file mode 100644 index 00000000000..d8bfb7d6a2e --- /dev/null +++ b/notification/sendgrid.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "2.0" +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - Sends an email with a SendGrid account through their API, not through + the SMTP service. +notes: + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account. +options: + username: + description: + username for logging into the SendGrid account + required: true + password: + description: password that corresponds to the username + required: true + from_address: + description: + the address in the "from" field for the email + required: true + to_addresses: + description: + a list with one or more recipient email addresses + required: true + subject: + description: + the desired subject for the email + required: true + +author: Matt Makai +''' + +EXAMPLES = ''' +# send an email to a single recipient that the deployment was successful +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost + +# send an email to more than one recipient that the build failed +- sendgrid + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost +''' + +# ======================================= +# sendgrid module support methods +# +try: + import urllib, urllib2 +except ImportError: + module.fail_json(msg="urllib and urllib2 are required") + +import base64 + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body): + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key':password, + 'from':from_address, 'subject': subject, 'text': body} + encoded_data = urllib.urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + if isinstance(recipient, unicode): + recipient = recipient.encode('utf-8') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + request = urllib2.Request(SENDGRID_URI) + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/json') + return urllib2.urlopen(request, encoded_data) + + +# ======================================= +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=True), + password=dict(required=True, no_log=True), + from_address=dict(required=True), + to_addresses=dict(required=True, type='list'), + subject=dict(required=True), + body=dict(required=True), + ), + supports_check_mode=True + ) + + username = module.params['username'] + password = module.params['password'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + + try: + response = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body) + except Exception: + module.fail_json(msg="unable to send email through SendGrid API") + + module.exit_json(msg=subject, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/notification/slack.py b/notification/slack.py index 5577228978b..fc0e7403637 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -28,12 +28,20 @@ author: Ramon de la Fuente options: domain: description: - - Slack (sub)domain for your environment without protocol. - (i.e. C(future500.slack.com)) - required: true + - Slack (sub)domain for your environment without protocol. (i.e. + C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may + be ignored. See token documentation for information. + required: false token: description: - - Slack integration token + - Slack integration token. This authenticates you to the slack service. + Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In + 1.8 and above, ansible adapts to the new slack API where tokens look + like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then slack will ignore any value of domain. If + the token is in the old format the domain is required. Ansible has no + control of when slack will get rid of the old API. When slack does + that the old format will stop working. required: true msg: description: @@ -81,6 +89,17 @@ options: choices: - 'yes' - 'no' + color: + version_added: 2.0 + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + required: false + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' """ EXAMPLES = """ @@ -103,13 +122,24 @@ EXAMPLES = """ link_names: 0 parse: 'none' +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + slack: + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} is alive!" + color: good + username: "" + icon_url: "" """ +OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color): + if color == 'normal': + payload = dict(text=text) + else: + payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: payload['channel'] = channel if (channel[0] == '#') else '#'+channel if username is not None: @@ -127,7 +157,13 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj return payload def do_notify_slack(module, domain, token, payload): - slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token) + if token.count('/') >= 2: + # New style token + slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token) + else: + if not domain: + module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook") + slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) response, info = fetch_url(module, slack_incoming_webhook, data=payload) if info['status'] != 200: @@ -137,7 +173,7 @@ def do_notify_slack(module, domain, token, payload): def main(): module = AnsibleModule( argument_spec = dict( - domain = dict(type='str', required=True), + domain = dict(type='str', required=False, default=None), token = dict(type='str', required=True), msg = dict(type='str', required=True), channel = dict(type='str', default=None), @@ -146,8 +182,8 @@ def main(): icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), - validate_certs = dict(default='yes', type='bool'), + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']) ) ) @@ -160,8 +196,9 @@ def main(): icon_emoji = module.params['icon_emoji'] link_names = module.params['link_names'] parse = module.params['parse'] + color = module.params['color'] - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color) do_notify_slack(module, domain, token, payload) module.exit_json(msg="OK") diff --git a/notification/sns.py b/notification/sns.py index f2ed178554e..54421b0e9fa 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -105,6 +105,7 @@ from ansible.module_utils.ec2 import * try: import boto + import boto.ec2 import boto.sns except ImportError: print "failed=True msg='boto required for this module'" diff --git a/notification/twilio.py b/notification/twilio.py index 8969c28aa50..faae7b6f58f 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Matt Makai +# (c) 2015, Matt Makai # # This file is part of Ansible # @@ -24,18 +24,20 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through an the Twilio SMS service. + - Sends a text message to a phone number through the Twilio messaging API. notes: - - Like the other notification modules, this one requires an external + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: - user's account id for Twilio found on the account page + user's Twilio account token found on the account page required: true auth_token: - description: user's authentication token for Twilio found on the account page + description: user's Twilio authentication token required: true msg: description: @@ -43,36 +45,64 @@ options: required: true to_number: description: - what phone number to send the text message to, format +15551112222 + one or more phone numbers to send the text message to, + format +15551112222 required: true from_number: description: - what phone number to send the text message from, format +15551112222 + the Twilio number to send the text message from, format +15551112222 required: true - -requirements: [ urllib, urllib2 ] + media_url: + description: + a URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS + required: false + author: Matt Makai ''' EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - +# send an SMS about the build status to (555) 303 5681 +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "All servers with webserver role are now configured." + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "This server's configuration is now complete." + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15553258899" + to_number: + - "+15551113232" + - "+12025551235" + - "+19735559010" + delegate_to: localhost + +# send an MMS to a single recipient with an update on the deployment +# and an image of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "Deployment complete!" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + media_url: "https://demo.twilio.com/logo.png" + delegate_to: localhost ''' # ======================================= -# text module support methods +# twilio module support methods # try: import urllib, urllib2 @@ -82,19 +112,22 @@ except ImportError: import base64 -def post_text(module, account_sid, auth_token, msg, from_number, to_number): +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) - AGENT = "Ansible/1.5" + AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} + if media_url: + data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) request = urllib2.Request(URI) base64string = base64.encodestring('%s:%s' % \ (account_sid, auth_token)).replace('\n', '') request.add_header('User-Agent', AGENT) request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') + request.add_header('Accept', 'application/json') request.add_header('Authorization', 'Basic %s' % base64string) return urllib2.urlopen(request, encoded_data) @@ -112,23 +145,29 @@ def main(): msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), + media_url=dict(default=None, required=False), ), supports_check_mode=True ) - + account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] + media_url = module.params['media_url'] + + if not isinstance(to_number, list): + to_number = [to_number] - try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: - module.fail_json(msg="unable to send text message to %s" % to_number) + for number in to_number: + try: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, number, media_url) + except Exception: + module.fail_json(msg="unable to send message to %s" % number) - module.exit_json(msg=msg, changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * diff --git a/packaging/bower.py b/packaging/language/bower.py similarity index 98% rename from packaging/bower.py rename to packaging/language/bower.py index e948f687bde..085f454e639 100644 --- a/packaging/bower.py +++ b/packaging/language/bower.py @@ -24,7 +24,7 @@ module: bower short_description: Manage bower packages with bower description: - Manage bower packages with bower -version_added: 1.7 +version_added: 1.9 author: Michael Warkentin options: name: @@ -108,7 +108,7 @@ class Bower(object): return '' def list(self): - cmd = ['list', '--json'] + cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] installed = list() missing = list() diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 5d8ba563c8b..a24b826a4de 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -128,29 +128,34 @@ def main(): supports_check_mode=True ) - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + options = [] - options = set([]) # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") + options.append('--no-ansi') + options.append('--no-progress') + options.append('--no-interaction') - if module.check_mode: - options.add("--dry-run") + options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) - # Get composer command with fallback to default + # Get composer command with fallback to default command = module.params['command'] - del module.params['command']; # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) + if module.params['prefer_source']: + options.append('--prefer-source') + if module.params['prefer_dist']: + options.append('--prefer-dist') + if module.params['no_dev']: + options.append('--no-dev') + if module.params['no_scripts']: + options.append('--no-scripts') + if module.params['no_plugins']: + options.append('--no-plugins') + if module.params['optimize_autoloader']: + options.append('--optimize-autoloader') + + if module.check_mode: + options.append('--dry-run') rc, out, err = composer_install(module, command, options) @@ -158,7 +163,8 @@ def main(): output = parse_out(err) module.fail_json(msg=output) else: - output = parse_out(out) + # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages + output = parse_out(out + err) module.exit_json(changed=has_changed(output), msg=output) # import module snippets diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 122fff559b6..ec344b7aa9b 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -53,6 +53,11 @@ options: - Specifies the base URL for the CPAN mirror to use required: false default: false + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB + required: false + default: false examples: - code: "cpanm: name=Dancer" description: Install I(Dancer) perl package. @@ -82,7 +87,7 @@ def _is_package_installed(module, name, locallib, cpanm): else: return False -def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): +def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm): # this code should use "%s" like everything else and just return early but not fixing all of it now. # don't copy stuff like this if from_path: @@ -99,6 +104,9 @@ def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): if mirror is not None: cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) + if mirror_only is True: + cmd = "{cmd} --mirror-only".format(cmd=cmd) + return cmd @@ -108,7 +116,8 @@ def main(): from_path=dict(default=None, required=False), notest=dict(default=False, type='bool'), locallib=dict(default=None, required=False), - mirror=dict(default=None, required=False) + mirror=dict(default=None, required=False), + mirror_only=dict(default=False, type='bool'), ) module = AnsibleModule( @@ -116,12 +125,13 @@ def main(): required_one_of=[['name', 'from_path']], ) - cpanm = module.get_bin_path('cpanm', True) - name = module.params['name'] - from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] + cpanm = module.get_bin_path('cpanm', True) + name = module.params['name'] + from_path = module.params['from_path'] + notest = module.boolean(module.params.get('notest', False)) + locallib = module.params['locallib'] + mirror = module.params['mirror'] + mirror_only = module.params['mirror_only'] changed = False @@ -129,7 +139,7 @@ def main(): if not installed: out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py new file mode 100644 index 00000000000..2aeb158625b --- /dev/null +++ b/packaging/language/maven_artifact.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +__author__ = 'cschmidt' + +from lxml import etree +from urllib2 import Request, urlopen, URLError, HTTPError +import os +import hashlib +import sys +import base64 + +DOCUMENTATION = ''' +--- +module: maven_artifact +short_description: Downloads an Artifact from a Maven Repository +version_added: "2.0" +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve + - snapshots or release versions of the artifact and will resolve the latest available version if one is not + - available. +author: Chris Schmidt +requirements: + - python libxml + - python urllib2 +options: + group_id: + description: The Maven groupId coordinate + required: true + artifact_id: + description: The maven artifactId coordinate + required: true + version: + description: The maven version coordinate + required: false + default: latest + classifier: + description: The maven classifier coordinate + required: false + default: null + extension: + description: The maven type/extension coordinate + required: false + default: jar + repository_url: + description: The URL of the Maven Repository to download from + required: false + default: http://repo1.maven.org/maven2 + username: + description: The username to authenticate as to the Maven Repository + required: false + default: null + password: + description: The passwor to authenticate with to the Maven Repository + required: false + default: null + dest: + description: The path where the artifact should be written to + required: true + default: false + state: + description: The desired state of the artifact + required: true + default: present + choices: [present,absent] +''' + +EXAMPLES = ''' +# Download the latest version of the commons-collections artifact from Maven Central +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar + +# Download Apache Commons-Collections 3.2 from Maven Central +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar + +# Download an artifact from a private repository requiring authentication +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar + +# Download a WAR File to the Tomcat webapps directory to be deployed +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war +''' + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = self.group_id.replace(".", "/") + "/" + self.artifact_id + if with_version and self.version: + return base + "/" + self.version + else: + return base + + def _generate_filename(self): + if not self.classifier: + return self.artifact_id + "." + self.extension + else: + return self.artifact_id + "-" + self.classifier + "." + self.extension + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + if self.classifier: + return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + else: + return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[len(parts) - 1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, base="http://repo1.maven.org/maven2", username=None, password=None): + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.user_agent = "Maven Artifact Downloader/1.0" + self.username = username + self.password = password + + def _find_latest_version_available(self, artifact): + path = "/%s/maven-metadata.xml" % (artifact.path(False)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.is_snapshot(): + path = "/%s/maven-metadata.xml" % (artifact.path()) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + basexpath = "/metadata/versioning/" + p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion") + if p: + return self._find_matching_artifact(p, artifact) + else: + return self._uri_for_artifact(artifact) + + def _find_matching_artifact(self, elems, artifact): + filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems) + if artifact.classifier: + filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems) + + if len(filtered) > 1: + print( + "There was more than one match. Selecting the first one. Try adding a classifier to get a better match.") + elif not len(filtered): + print("There were no matches.") + return None + + elem = filtered[0] + value = elem.xpath("value/text()") + return self._uri_for_artifact(artifact, value[0]) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension + + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension + + def _request(self, url, failmsg, f): + if not self.username: + headers = {"User-Agent": self.user_agent} + else: + headers = { + "User-Agent": self.user_agent, + "Authorization": "Basic " + base64.b64encode(self.username + ":" + self.password) + } + req = Request(url, None, headers) + try: + response = urlopen(req) + except HTTPError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + except URLError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + else: + return f(response) + + + def download(self, artifact, filename=None): + filename = artifact.get_filename(filename) + if not artifact.version or artifact.version == "latest": + artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), + artifact.classifier, artifact.extension) + + url = self.find_uri_for_artifact(artifact) + if not self.verify_md5(filename, url + ".md5"): + response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) + if response: + with open(filename, 'w') as f: + # f.write(response.read()) + self._write_chunks(response, f, report_hook=self.chunk_report) + return True + else: + return False + else: + return True + + def chunk_report(self, bytes_so_far, chunk_size, total_size): + percent = float(bytes_so_far) / total_size + percent = round(percent * 100, 2) + sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % + (bytes_so_far, total_size, percent)) + + if bytes_so_far >= total_size: + sys.stdout.write('\n') + + def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): + total_size = response.info().getheader('Content-Length').strip() + total_size = int(total_size) + bytes_so_far = 0 + + while 1: + chunk = response.read(chunk_size) + bytes_so_far += len(chunk) + + if not chunk: + break + + file.write(chunk) + if report_hook: + report_hook(bytes_so_far, chunk_size, total_size) + + return bytes_so_far + + def verify_md5(self, file, remote_md5): + if not os.path.exists(file): + return False + else: + local_md5 = self._local_md5(file) + remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) + return local_md5 == remote + + def _local_md5(self, file): + md5 = hashlib.md5() + with open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), ''): + md5.update(chunk) + return md5.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + group_id = dict(default=None), + artifact_id = dict(default=None), + version = dict(default=None), + classifier = dict(default=None), + extension = dict(default=None), + repository_url = dict(default=None), + username = dict(default=None), + password = dict(default=None), + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + dest = dict(default=None), + ) + ) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + classifier = module.params["classifier"] + extension = module.params["extension"] + repository_url = module.params["repository_url"] + repository_username = module.params["username"] + repository_password = module.params["password"] + state = module.params["state"] + dest = module.params["dest"] + + if not repository_url: + repository_url = "http://repo1.maven.org/maven2" + + downloader = MavenDownloader(repository_url, repository_username, repository_password) + + try: + artifact = Artifact(group_id, artifact_id, version, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + prev_state = "absent" + if os.path.isdir(dest): + dest = dest + "/" + artifact_id + "-" + version + "." + extension + if os.path.lexists(dest): + prev_state = "present" + else: + path = os.path.dirname(dest) + if not os.path.exists(path): + os.makedirs(path) + + if prev_state == "present": + module.exit_json(dest=dest, state=state, changed=False) + + try: + if downloader.download(artifact, dest): + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + else: + module.fail_json(msg="Unable to download the artifact") + except ValueError as e: + module.fail_json(msg=e.args[0]) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 1dd2e998492..8407589116a 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -149,6 +149,7 @@ class Npm(object): #If path is specified, cd into that path and run the command. cwd = None if self.path: + self.path = os.path.abspath(os.path.expanduser(self.path)) if not os.path.exists(self.path): os.makedirs(self.path) if not os.path.isdir(self.path): diff --git a/packaging/dnf.py b/packaging/os/dnf.py similarity index 99% rename from packaging/dnf.py rename to packaging/os/dnf.py index 2ce8cb6ab2a..222fe4fa222 100644 --- a/packaging/dnf.py +++ b/packaging/os/dnf.py @@ -34,7 +34,7 @@ except: DOCUMENTATION = ''' --- module: dnf -version_added: historical +version_added: 1.9 short_description: Manages packages with the I(dnf) package manager description: - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager. @@ -43,21 +43,18 @@ options: description: - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file." required: true - version_added: "1.8" default: null aliases: [] list: description: - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. required: false - version_added: "1.8" default: null state: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: false choices: [ "present", "latest", "absent" ] - version_added: "1.8" default: "present" enablerepo: description: @@ -65,17 +62,15 @@ options: These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false - version_added: "1.8" default: null aliases: [] - + disablerepo: description: - I(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false - version_added: "1.8" default: null aliases: [] @@ -83,7 +78,6 @@ options: description: - The remote dnf configuration file to use for the transaction. required: false - version_added: "1.8" default: null aliases: [] @@ -92,7 +86,6 @@ options: - Whether to disable the GPG checking of signatures of packages being installed. Has an effect only if state is I(present) or I(latest). required: false - version_added: "1.8" default: "no" choices: ["yes", "no"] aliases: [] diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 2ecac0c4ace..aac4efd827e 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -31,7 +31,8 @@ options: name: description: - name of package to install/remove - required: true + required: false + default: None state: description: - state of the package @@ -48,7 +49,7 @@ options: description: - upgrade all homebrew packages required: false - default: no + default: "no" choices: [ "yes", "no" ] install_options: description: diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index dede8d4bb36..75acead517b 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -32,7 +32,7 @@ options: state: description: - state of the cask - choices: [ 'installed', 'uninstalled' ] + choices: [ 'present', 'absent' ] required: false default: present ''' diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index a79ba076a8a..d329227b980 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -52,7 +52,7 @@ homebrew_tap: tap=homebrew/dupes,homebrew/science state=present def a_valid_tap(tap): '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') return regex.match(tap) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index 0b23a2f93ce..a91f8e3054d 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -42,7 +42,7 @@ options: - Desired state of the package. required: false default: "present" - choices: ["present", "absent"] + choices: ["present", "absent", "latest"] recurse: description: @@ -67,6 +67,9 @@ EXAMPLES = ''' # Install package foo - pacman: name=foo state=present +# Upgrade package foo +- pacman: name=foo state=latest update_cache=yes + # Remove packages foo and bar - pacman: name=foo,bar state=absent @@ -85,17 +88,37 @@ import sys PACMAN_PATH = "/usr/bin/pacman" +def get_version(pacman_output): + """Take pacman -Qi or pacman -Si output and get the Version""" + lines = pacman_output.split('\n') + for line in lines: + if 'Version' in line: + return line.split(':')[1].strip() + return None + def query_package(module, name, state="present"): - # pacman -Q returns 0 if the package is installed, - # 1 if it is not installed + """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date.""" if state == "present": - cmd = "pacman -Q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - - return False + lcmd = "pacman -Qi %s" % (name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + # get the version installed locally (if any) + lversion = get_version(lstdout) + + rcmd = "pacman -Si %s" % (name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + # get the version in the repository + rversion = get_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False def update_package_db(module): @@ -118,7 +141,8 @@ def remove_packages(module, packages): # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - if not query_package(module, package): + installed, updated = query_package(module, package) + if not installed: continue cmd = "pacman -%s %s --noconfirm" % (args, package) @@ -136,11 +160,13 @@ def remove_packages(module, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, packages, package_files): +def install_packages(module, state, packages, package_files): install_c = 0 for i, package in enumerate(packages): - if query_package(module, package): + # if the package is installed and state == present or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): continue if package_files[i]: @@ -165,9 +191,10 @@ def install_packages(module, packages, package_files): def check_packages(module, packages, state): would_be_changed = [] for package in packages: - installed = query_package(module, package) - if ((state == "present" and not installed) or - (state == "absent" and installed)): + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): would_be_changed.append(package) if would_be_changed: if state == "absent": @@ -182,7 +209,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=['pkg']), - state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), + state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), recurse = dict(default='no', choices=BOOLEANS, type='bool'), update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), required_one_of = [['name', 'update_cache']], @@ -223,8 +250,8 @@ def main(): if module.check_mode: check_packages(module, pkgs, p['state']) - if p['state'] == 'present': - install_packages(module, pkgs, pkg_files) + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, pkg_files) elif p['state'] == 'absent': remove_packages(module, pkgs) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py new file mode 100644 index 00000000000..b250a02850c --- /dev/null +++ b/packaging/os/pkg5.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Peter Oliver +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +DOCUMENTATION = ''' +--- +module: pkg5 +author: Peter Oliver +short_description: Manages packages with the Solaris 11 Image Packaging System +version_added: 1.9 +description: + - IPS packages are the native packages in Solaris 11 and higher. +notes: + - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). +options: + name: + description: + - An FRMI of the package(s) to be installed/removed/updated. + - Multiple packages may be specified, separated by C(,). + required: true + state: + description: + - Whether to install (I(present), I(latest)), or remove (I(absent)) a + package. + required: false + default: present + choices: [ present, latest, absent ] + accept_licenses: + description: + - Accept any licences. + required: false + default: false + choices: [ true, false ] + aliases: [ accept_licences, accept ] +''' +EXAMPLES = ''' +# Install Vim: +- pkg5: name=editor/vim + +# Remove finger daemon: +- pkg5: name=service/network/finger state=absent + +# Install several packages at once: +- pkg5: + name: + - /file/gnu-findutils + - /text/gnu-grep +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='list'), + state=dict( + default='present', + choices=[ + 'present', + 'installed', + 'latest', + 'absent', + 'uninstalled', + 'removed', + ] + ), + accept_licenses=dict( + choices=BOOLEANS, + default=False, + aliases=['accept_licences', 'accept'], + ), + ) + ) + + params = module.params + packages = [] + + # pkg(5) FRMIs include a comma before the release number, but + # AnsibleModule will have split this into multiple items for us. + # Try to spot where this has happened and fix it. + for fragment in params['name']: + if ( + re.search('^\d+(?:\.\d+)*', fragment) + and packages and re.search('@[^,]*$', packages[-1]) + ): + packages[-1] += ',' + fragment + else: + packages.append(fragment) + + if params['state'] in ['present', 'installed']: + ensure(module, 'present', packages, params) + elif params['state'] in ['latest']: + ensure(module, 'latest', packages, params) + elif params['state'] in ['absent', 'uninstalled', 'removed']: + ensure(module, 'absent', packages, params) + + +def ensure(module, state, packages, params): + response = { + 'results': [], + 'msg': '', + } + behaviour = { + 'present': { + 'filter': lambda p: not is_installed(module, p), + 'subcommand': 'install', + }, + 'latest': { + 'filter': lambda p: not is_latest(module, p), + 'subcommand': 'install', + }, + 'absent': { + 'filter': lambda p: is_installed(module, p), + 'subcommand': 'uninstall', + }, + } + + to_modify = filter(behaviour[state]['filter'], packages) + if to_modify: + rc, out, err = module.run_command( + [ + 'pkg', behaviour[state]['subcommand'] + ] + + (['--accept'] if params['accept_licenses'] else []) + + [ + '-q', '--' + ] + to_modify + ) + response['rc'] = rc + response['results'].append(out) + response['msg'] += err + response['changed'] = True + if rc != 0: + module.fail_json(**response) + + module.exit_json(**response) + + +def is_installed(module, package): + rc, out, err = module.run_command(['pkg', 'list', '--', package]) + return True if rc == 0 else False + + +def is_latest(module, package): + rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + return True if rc == 1 else False + + +from ansible.module_utils.basic import * +main() diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py new file mode 100644 index 00000000000..63c62059203 --- /dev/null +++ b/packaging/os/pkg5_publisher.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Peter Oliver +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +DOCUMENTATION = ''' +--- +module: pkg5_publisher +author: Peter Oliver +short_description: Manages Solaris 11 Image Packaging System publishers +version_added: 1.9 +description: + - IPS packages are the native packages in Solaris 11 and higher. + - This modules will configure which publishers a client will download IPS + packages from. +options: + name: + description: + - The publisher's name. + required: true + aliases: [ publisher ] + state: + description: + - Whether to ensure that a publisher is present or absent. + required: false + default: present + choices: [ present, absent ] + sticky: + description: + - Packages installed from a sticky repository can only receive updates + from that repository. + required: false + default: null + choices: [ true, false ] + enabled: + description: + - Is the repository enabled or disabled? + required: false + default: null + choices: [ true, false ] + origin: + description: + - A path or URL to the repository. + - Multiple values may be provided. + required: false + default: null + mirror: + description: + - A path or URL to the repository mirror. + - Multiple values may be provided. + required: false + default: null +''' +EXAMPLES = ''' +# Fetch packages for the solaris publisher direct from Oracle: +- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/ + +# Configure a publisher for locally-produced packages: +- pkg5_publisher: name=site origin=https://pkg.example.com/site/ +''' + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['publisher']), + state=dict(default='present', choices=['present', 'absent']), + sticky=dict(choices=BOOLEANS), + enabled=dict(choices=BOOLEANS), + # search_after=dict(), + # search_before=dict(), + origin=dict(type='list'), + mirror=dict(type='list'), + ) + ) + + for option in ['origin', 'mirror']: + if module.params[option] == ['']: + module.params[option] = [] + + if module.params['state'] == 'present': + modify_publisher(module, module.params) + else: + unset_publisher(module, module.params['name']) + + +def modify_publisher(module, params): + name = params['name'] + existing = get_publishers(module) + + if name in existing: + for option in ['origin', 'mirror', 'sticky', 'enabled']: + if params[option] != None: + if params[option] != existing[name][option]: + return set_publisher(module, params) + else: + return set_publisher(module, params) + + module.exit_json() + + +def set_publisher(module, params): + name = params['name'] + args = [] + + if params['origin'] != None: + args.append('--remove-origin=*') + args.extend(['--add-origin=' + u for u in params['origin']]) + if params['mirror'] != None: + args.append('--remove-mirror=*') + args.extend(['--add-mirror=' + u for u in params['mirror']]) + + if params['sticky'] != None: + args.append('--sticky' if params['sticky'] else '--non-sticky') + if params['enabled'] != None: + args.append('--enable' if params['enabled'] else '--disable') + + rc, out, err = module.run_command( + ["pkg", "set-publisher"] + args + [name], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + module.exit_json(**response) + + +def unset_publisher(module, publisher): + if not publisher in get_publishers(module): + module.exit_json() + + rc, out, err = module.run_command( + ["pkg", "unset-publisher", publisher], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + module.exit_json(**response) + + +def get_publishers(module): + rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) + + lines = out.splitlines() + keys = lines.pop(0).lower().split("\t") + + publishers = {} + for line in lines: + values = dict(zip(keys, map(unstringify, line.split("\t")))) + name = values['publisher'] + + if not name in publishers: + publishers[name] = dict( + (k, values[k]) for k in ['sticky', 'enabled'] + ) + publishers[name]['origin'] = [] + publishers[name]['mirror'] = [] + + publishers[name][values['type']].append(values['uri']) + + return publishers + + +def unstringify(val): + if val == "-": + return None + elif val == "true": + return True + elif val == "false": + return False + else: + return val + + +from ansible.module_utils.basic import * +main() diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py old mode 100755 new mode 100644 index 866c9f76a4c..f4c203e56e0 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -1,8 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Shaun Zinck -# Written by Shaun Zinck +# Copyright (c) 2013 Shaun Zinck +# Copyright (c) 2015 Lawrence Leonard Gilbert +# +# Written by Shaun Zinck # Based on pacman module written by Afterburn # that was based on apt module written by Matthew Williams # @@ -23,27 +25,32 @@ DOCUMENTATION = ''' --- module: pkgin -short_description: Package manager for SmartOS +short_description: Package manager for SmartOS, NetBSD, et al. description: - - Manages SmartOS packages + - "The standard package manager for SmartOS, but also usable on NetBSD + or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" +author: Shaun Zinck, Larry Gilbert +notes: + - "Known bug with pkgin < 0.8.0: if a package is removed and another + package depends on it, the other package will be silently removed as + well. New to Ansible 1.9: check-mode support." options: name: description: - - name of package to install/remove + - Name of package to install/remove; + - multiple names may be given, separated by commas required: true state: description: - - state of the package + - Intended state of the package choices: [ 'present', 'absent' ] required: false default: present -author: Shaun Zinck -notes: [] ''' EXAMPLES = ''' -# install package foo" +# install package foo - pkgin: name=foo state=present # remove package foo @@ -60,64 +67,97 @@ import os import sys import pipes -def query_package(module, pkgin_path, name, state="present"): +def query_package(module, pkgin_path, name): + """Search for the package by name. + + Possible return values: + * "present" - installed, no upgrade needed + * "outdated" - installed, but can be upgraded + * False - not installed or not found + """ + + # Use "pkgin search" to find the package. The regular expression will + # only match on the complete name. + rc, out, err = module.run_command("%s search \"^%s$\"" % (pkgin_path, name)) + + # rc will not be 0 unless the search was a success + if rc == 0: + + # Get first line + line = out.split('\n')[0] + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + pkgname_with_version, raw_state = out.split(' ')[0:2] - if state == "present": + # Strip version + # (results in sth like 'gcc47-libs') + pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) - rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) + if name != pkgname_without_version: + return False + # no fall-through - if rc == 0: - # At least one package with a package name that starts with ``name`` - # is installed. For some cases this is not sufficient to determine - # wether the queried package is installed. - # - # E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but - # ``gcc47-libs`` being installed, ``out`` would be: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # - # Multiline output is also possible, for example with the same query - # and bot ``gcc47`` and ``gcc47-libs`` being installed: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series + # The package was found; now return its state + if raw_state == '<': + return 'outdated' + elif raw_state == '=' or raw_state == '>': + return 'present' + else: + return False - # Loop over lines in ``out`` - for line in out.split('\n'): - # Strip description - # (results in sth. like 'gcc47-libs-4.7.2nb4') - pkgname_with_version = out.split(' ')[0] +def format_action_message(module, action, count): + vars = { "actioned": action, + "count": count } - # Strip version - # (results in sth like 'gcc47-libs') - pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) + if module.check_mode: + message = "would have %(actioned)s %(count)d package" % vars + else: + message = "%(actioned)s %(count)d package" % vars - if name == pkgname_without_version: - return True + if count == 1: + return message + else: + return message + "s" - return False + +def format_pkgin_command(module, pkgin_path, command, package): + vars = { "pkgin": pkgin_path, + "command": command, + "package": package } + + if module.check_mode: + return "%(pkgin)s -n %(command)s %(package)s" % vars + else: + return "%(pkgin)s -y %(command)s %(package)s" % vars def remove_packages(module, pkgin_path, packages): remove_c = 0 + # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove if not query_package(module, pkgin_path, package): continue - rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package)) + rc, out, err = module.run_command( + format_pkgin_command(module, pkgin_path, "remove", package)) - if query_package(module, pkgin_path, package): + if not module.check_mode and query_package(module, pkgin_path, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) remove_c += 1 if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) module.exit_json(changed=False, msg="package(s) already absent") @@ -130,15 +170,16 @@ def install_packages(module, pkgin_path, packages): if query_package(module, pkgin_path, package): continue - rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package)) + rc, out, err = module.run_command( + format_pkgin_command(module, pkgin_path, "install", package)) - if not query_package(module, pkgin_path, package): + if not module.check_mode and not query_package(module, pkgin_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) install_c += 1 if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c)) module.exit_json(changed=False, msg="package(s) already present") @@ -148,7 +189,8 @@ def main(): module = AnsibleModule( argument_spec = dict( state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True))) + name = dict(aliases=["pkg"], required=True)), + supports_check_mode = True) pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index a1f443fd4e1..1aa8e0c737f 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -149,6 +149,9 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): else: pkgsite = "-r %s" % (pkgsite) + batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts, + # setting them to their default values. + if not module.check_mode and not cached: if old_pkgng: rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) @@ -163,9 +166,9 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): if not module.check_mode: if old_pkgng: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) + rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package)) else: - rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) + rc, out, err = module.run_command("%s %s install %s -g -U -y %s" % (batch_var, pkgng_path, pkgsite, package)) if not module.check_mode and not query_package(module, pkgng_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 78a7db72bf5..635617b4efe 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -163,9 +163,13 @@ def main(): out = out[:75] if rc is None: + # pkgutil was not executed because the package was already present/absent result['changed'] = False - else: + elif rc == 0: result['changed'] = True + else: + result['changed'] = False + result['failed'] = True if out: result['stdout'] = out diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 85027bfc79b..eb77baa14f6 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -132,8 +132,22 @@ options: default: null choices: [ "yes", "web" ] + getbinpkg: + description: + - Prefer packages specified at PORTAGE_BINHOST in make.conf + required: false + default: null + choices: [ "yes" ] + + usepkgonly: + description: + - Merge only binaries (no compiling). This sets getbinpkg=yes. + required: false + deafult: null + choices: [ "yes" ] + requirements: [ gentoolkit ] -author: Yap Sok Ann +author: Yap Sok Ann, Andrew Udvare notes: [] ''' @@ -147,6 +161,12 @@ EXAMPLES = ''' # Update package foo to the "best" version - portage: package=foo update=yes +# Install package foo using PORTAGE_BINHOST setup +- portage: package=foo getbinpkg=yes + +# Re-install world from binary packages only and do not allow any compiling +- portage: package=@world usepkgonly=yes + # Sync repositories and update world - portage: package=@world update=yes deep=yes sync=yes @@ -160,6 +180,7 @@ EXAMPLES = ''' import os import pipes +import re def query_package(module, package, action): @@ -210,7 +231,7 @@ def sync_repositories(module, webrsync=False): webrsync_path = module.get_bin_path('emerge-webrsync', required=True) cmd = '%s --quiet' % webrsync_path else: - cmd = '%s --sync --quiet' % module.emerge_path + cmd = '%s --sync --quiet --ask=n' % module.emerge_path rc, out, err = module.run_command(cmd) if rc != 0: @@ -244,11 +265,17 @@ def emerge_packages(module, packages): 'onlydeps': '--onlydeps', 'quiet': '--quiet', 'verbose': '--verbose', + 'getbinpkg': '--getbinpkg', + 'usepkgonly': '--usepkgonly', } for flag, arg in emerge_flags.iteritems(): if p[flag]: args.append(arg) + # usepkgonly implies getbinpkg + if p['usepkgonly'] and not p['getbinpkg']: + args.append('--getbinpkg') + cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( @@ -256,9 +283,19 @@ def emerge_packages(module, packages): msg='Packages not installed.', ) + # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite + # this error + if (p['usepkgonly'] or p['getbinpkg']) \ + and 'Permission denied (publickey).' in err: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Please check your PORTAGE_BINHOST configuration in make.conf ' + 'and your SSH authorized_keys file', + ) + changed = True for line in out.splitlines(): - if line.startswith('>>> Emerging (1 of'): + if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): break else: changed = False @@ -335,6 +372,7 @@ def cleanup_packages(module, packages): def run_emerge(module, packages, *args): args = list(args) + args.append('--ask=n') if module.check_mode: args.append('--pretend') @@ -366,6 +404,8 @@ def main(): quiet=dict(default=None, choices=['yes']), verbose=dict(default=None, choices=['yes']), sync=dict(default=None, choices=['yes', 'web']), + getbinpkg=dict(default=None, choices=['yes']), + usepkgonly=dict(default=None, choices=['yes']), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index a42ee7b87fc..320d17bfc00 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -52,7 +52,8 @@ options: choices: [ "yes", "no" ] force: description: - - Corresponds to the C(--force) option for I(urpmi). + - Assume "yes" is the answer to any question urpmi has to ask. + Corresponds to the C(--force) option for I(urpmi). required: false default: yes choices: [ "yes", "no" ] diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 196a7e2782e..ccf901d4fa1 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -50,6 +50,13 @@ options: required: false choices: [ present, latest, absent ] default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage ] + default: "package" + version_added: "2.0" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package @@ -95,25 +102,31 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version(m, name): +def get_current_version(m, packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(name) - (rc, stdout, stderr) = m.run_command(cmd) + cmd.extend(packages) + + rc, stdout, stderr = m.run_command(cmd, check_rc=False) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') - for stdoutline, package in zip(stdout.splitlines(), name): - m = rpmoutput_re.match(stdoutline) - if m == None: + + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None - rpmpackage = m.group(1) - rpmversion = m.group(2) - if package != rpmpackage: + package = match.group(1) + version = match.group(2) + current_version[package] = version + + for package in packages: + if package not in current_version: + print package + ' was not returned by rpm \n' return None - current_version[package] = rpmversion return current_version + # Function used to find out if a package is currently installed. def get_package_state(m, packages): cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] @@ -123,24 +136,26 @@ def get_package_state(m, packages): installed_state = {} rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline, name in zip(stdout.splitlines(), packages): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - package = m.group(1) - result = m.group(2) - if not name.startswith(package): - print name + ':' + package + ':' + stdoutline + '\n' + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None + package = match.group(1) + result = match.group(2) if result == 'is installed': - installed_state[name] = True + installed_state[package] = True else: - installed_state[name] = False + installed_state[package] = False + + for package in packages: + if package not in installed_state: + print package + ' was not returned by rpm \n' + return None return installed_state # Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): packages = [] for package in name: if installed_state[package] is False: @@ -150,7 +165,7 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # add global options before zypper command if disable_gpg_check: cmd.append('--no-gpg-checks') - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) # add install parameter if disable_recommends and not old_zypper: cmd.append('--no-recommends') @@ -170,10 +185,10 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme return (rc, stdout, stderr, changed) # Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper) # if we've already made a change, we don't have to check whether a version changed if not changed: @@ -185,9 +200,9 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen cmd.append('--no-gpg-checks') if old_zypper: - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) else: - cmd.extend(['update', '--auto-agree-with-licenses']) + cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type]) cmd.extend(name) rc, stdout, stderr = m.run_command(cmd, check_rc=False) @@ -201,13 +216,13 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen return (rc, stdout, stderr, changed) # Function used to make sure a package is not installed. -def package_absent(m, name, installed_state, old_zypper): +def package_absent(m, name, installed_state, package_type, old_zypper): packages = [] for package in name: if installed_state[package] is True: packages.append(package) if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd) @@ -231,6 +246,7 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), ), @@ -242,6 +258,7 @@ def main(): name = params['name'] state = params['state'] + type_ = params['type'] disable_gpg_check = params['disable_gpg_check'] disable_recommends = params['disable_recommends'] @@ -264,11 +281,11 @@ def main(): # Perform requested action if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, old_zypper) + (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper) elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) if rc != 0: if stderr: diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 5e41683734b..f208305fe60 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -2,6 +2,7 @@ # encoding: utf-8 # (c) 2013, Matthias Vogelgesang +# (c) 2014, Justin Lecher # # This file is part of Ansible # @@ -51,13 +52,20 @@ options: - A description of the repository disable_gpg_check: description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if state is - I(present). + - Whether to disable GPG signature checking of + all packages. Has an effect only if state is + I(present). required: false default: "no" choices: [ "yes", "no" ] aliases: [] + refresh: + description: + - Enable autorefresh of the repository. + required: false + default: "yes" + choices: [ "yes", "no" ] + aliases: [] notes: [] requirements: [ zypper ] ''' @@ -145,11 +153,11 @@ def repo_exists(module, old_zypper, **kwargs): return False -def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper): +def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh): if old_zypper: cmd = ['/usr/bin/zypper', 'sa'] else: - cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh'] + cmd = ['/usr/bin/zypper', 'ar', '--check'] if repo.startswith("file:/") and old_zypper: cmd.extend(['-t', 'Plaindir']) @@ -162,6 +170,9 @@ def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper): if disable_gpg_check and not old_zypper: cmd.append('--no-gpgcheck') + if refresh: + cmd.append('--refresh') + cmd.append(repo) if not repo.endswith('.repo'): @@ -216,6 +227,7 @@ def main(): state=dict(choices=['present', 'absent'], default='present'), description=dict(required=False), disable_gpg_check = dict(required=False, default='no', type='bool'), + refresh = dict(required=False, default='yes', type='bool'), ), supports_check_mode=False, ) @@ -225,6 +237,7 @@ def main(): name = module.params['name'] description = module.params['description'] disable_gpg_check = module.params['disable_gpg_check'] + refresh = module.params['refresh'] def exit_unchanged(): module.exit_json(changed=False, repo=repo, state=state, name=name) @@ -260,7 +273,7 @@ def main(): if exists: exit_unchanged() - changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper) + changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh) elif state == 'absent': if not exists: exit_unchanged() diff --git a/source_control/bzr.py b/source_control/bzr.py index 996150a39af..0d25a026f7a 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -45,11 +45,12 @@ options: bzr revno or revid. force: required: false - default: "yes" + default: "no" choices: [ 'yes', 'no' ] description: - If C(yes), any modified files in the working - tree will be discarded. + tree will be discarded. Before 1.9 the default + value was "yes". executable: required: false default: null @@ -145,7 +146,7 @@ def main(): dest=dict(required=True), name=dict(required=True, aliases=['parent']), version=dict(default='head'), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), executable=dict(default=None), ) ) diff --git a/system/alternatives.py b/system/alternatives.py index 575cb572867..ff4de59cf11 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -4,6 +4,7 @@ """ Ansible module to manage symbolic link alternatives. (c) 2014, Gabe Mulley +(c) 2015, David Wittman This file is part of Ansible @@ -26,7 +27,7 @@ DOCUMENTATION = ''' module: alternatives short_description: Manages alternative programs for common commands description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" options: @@ -41,6 +42,7 @@ options: link: description: - The path to the symbolic link that should point to the real executable. + - This option is required on RHEL-based distributions required: false requirements: [ update-alternatives ] ''' @@ -55,12 +57,14 @@ EXAMPLES = ''' DEFAULT_LINK_PRIORITY = 50 +import re + def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - path = dict(required=True), + path = dict(required=True), link = dict(required=False), ), supports_check_mode=True, @@ -71,71 +75,51 @@ def main(): path = params['path'] link = params['link'] - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) current_path = None all_alternatives = [] - os_family = None - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, _) = module.run_command( + [UPDATE_ALTERNATIVES, '--display', name] ) - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this on Debian systems: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - if rc == 0: - os_family = "Debian" - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - elif rc == 2: - os_family = "RedHat" - # This is the version of update-alternatives that is shipped with - # chkconfig on RedHat-based systems. Try again with the right options. - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--list'] - ) - for line in query_output.splitlines(): - line_name, line_mode, line_path = line.strip().split("\t") - if line_name != name: - continue - current_path = line_path - break + # Alternatives already exist for this link group + # Parse the output to determine the current path of the symlink and + # available alternatives + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', + re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) + + current_path = current_path_regex.search(display_output).group(1) + all_alternatives = alternative_regex.findall(display_output) + + if not link: + # Read the current symlink target from `update-alternatives --query` + # in case we need to install the new alternative before setting it. + # + # This is only compatible on Debian-based systems, as the other + # alternatives don't have --query available + rc, query_output, _ = module.run_command( + [UPDATE_ALTERNATIVES, '--query', name] + ) + if rc == 0: + for line in query_output.splitlines(): + if line.startswith('Link:'): + link = line.split()[1] + break if current_path != path: if module.check_mode: module.exit_json(changed=True, current_path=current_path) try: # install the requested path if necessary - # (unsupported on the RedHat version) - if path not in all_alternatives and os_family == "Debian": + if path not in all_alternatives: + if not link: + module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") + module.run_command( [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], check_rc=True @@ -148,7 +132,7 @@ def main(): ) module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: + except subprocess.CalledProcessError as cpe: module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) diff --git a/system/at.py b/system/at.py index c63527563fd..770148991f1 100644 --- a/system/at.py +++ b/system/at.py @@ -78,7 +78,7 @@ import tempfile def add_job(module, result, at_cmd, count, units, command, script_file): - at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) + at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units) rc, out, err = module.run_command(at_command, check_rc=True) if command: os.unlink(script_file) diff --git a/system/cronvar.py b/system/cronvar.py new file mode 100755 index 00000000000..23a626472c3 --- /dev/null +++ b/system/cronvar.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Cronvar Plugin: The goal of this plugin is to provide an indempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable +# +# This module is based on the crontab module. +# + +DOCUMENTATION = """ +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. This module allows + you to create, update, or delete cron variable definitions. +version_added: "2.0" +options: + name: + description: + - Name of the crontab variable. + default: null + required: true + value: + description: + - The value to set this variable to. Required if state=present. + required: false + default: null + insertafter: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + after the variable specified. + insertbefore: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + just before the variable specified. + state: + description: + - Whether to ensure that the variable is present or absent. + required: false + default: present + choices: [ "present", "absent" ] + user: + description: + - The specific user whose crontab should be modified. + required: false + default: root + cron_file: + description: + - If specified, uses this file in cron.d instead of an individual user's crontab. + required: false + default: null + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + required: false + default: false +requirements: + - cron +author: Doug Luce +""" + +EXAMPLES = ''' +# Ensure a variable exists. +# Creates an entry like "EMAIL=doug@ansibmod.con.com" +- cronvar: name="EMAIL" value="doug@ansibmod.con.com" + +# Make sure a variable is gone. This will remove any variable named +# "LEGACY" +- cronvar: name="LEGACY" state=absent + +# Adds a variable to a file under /etc/cron.d +- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log" + user="root" cron_file=ansible_yum-autoupdate +''' + +import os +import re +import tempfile +import platform +import pipes +import shlex + +CRONCMD = "/usr/bin/crontab" + +class CronVarError(Exception): + pass + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + if self.user is None: + self.user = 'root' + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', )) + # select whether we dump additional debug info through syslog + self.syslogging = False + + if cron_file: + self.cron_file = '/etc/cron.d/%s' % cron_file + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError, e: + # cron file does not exist + return + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and + not re.match( r'# \(/tmp/.*installed on.*\)', l) and + not re.match( r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + if self.syslogging: + syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError, e: + # cron file does not exist + return False + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + comment = None + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + (var_name, _) = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) + elif platform.system() == 'AIX': + return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) + +#================================================== + +def main(): + # The following example playbooks: + # + # - cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + value=dict(required=False), + user=dict(required=False), + cron_file=dict(required=False), + insertafter=dict(default=None), + insertbefore=dict(default=None), + state=dict(default='present', choices=['present', 'absent']), + backup=dict(default=False, type='bool'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(022) + cronvar = CronVar(module, user, cron_file) + + if cronvar.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variabale") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + # --- should never get here + module.exit_json(msg="Unable to execute cronvar task.") + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/system/crypttab.py b/system/crypttab.py index 70230fa53e1..ccd4102c66b 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -24,7 +24,7 @@ module: crypttab short_description: Encrypted Linux block devices description: - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). -version_added: "1.8" +version_added: "1.9" options: name: description: @@ -103,7 +103,7 @@ def main(): state = module.params['state'] path = module.params['path'] - if backing_device is None and password is None and opts is None: + if state != 'absent' and backing_device is None and password is None and opts is None: module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", **module.params) diff --git a/system/debconf.py b/system/debconf.py index 7f5ea0368ca..0deaff25eb1 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -34,6 +34,7 @@ notes: - A number of questions have to be answered (depending on the package). Use 'debconf-show ' on any Debian or derivative with the package installed to see questions/settings available. + - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. requirements: [ debconf, debconf-utils ] options: name: @@ -85,8 +86,6 @@ debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license- debconf: name='tzdata' ''' -import pipes - def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) @@ -105,14 +104,14 @@ def get_selections(module, pkg): def set_selection(module, pkg, question, vtype, value, unseen): - data = ' '.join([ question, vtype, value ]) - setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] + cmd = [setsel] if unseen: cmd.append('-u') - return module.run_command(' '.join(cmd), use_unsafe_shell=True) + data = ' '.join([pkg, question, vtype, value]) + + return module.run_command(cmd, data=data) def main(): @@ -136,7 +135,6 @@ def main(): unseen = module.params["unseen"] prev = get_selections(module, pkg) - diff = '' changed = False msg = "" diff --git a/system/facter.py b/system/facter.py index a72cdc6536f..a4912835447 100644 --- a/system/facter.py +++ b/system/facter.py @@ -45,7 +45,7 @@ def main(): argument_spec = dict() ) - cmd = ["/usr/bin/env", "facter", "--json"] + cmd = ["/usr/bin/env", "facter", "--puppet", "--json"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(**json.loads(out)) diff --git a/system/firewalld.py b/system/firewalld.py index cf90c5ace56..0348c6ecb47 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -23,22 +23,22 @@ DOCUMENTATION = ''' module: firewalld short_description: Manage arbitrary ports/services with firewalld description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules + - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules. version_added: "1.4" options: service: description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" + - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services." required: false default: null port: description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" + - "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges." required: false default: null rich_rule: description: - - "Rich rule to add/remove to/from firewalld" + - "Rich rule to add/remove to/from firewalld." required: false default: null source: @@ -54,7 +54,7 @@ options: choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] permanent: description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" + - "Should this configuration be in the running firewalld configuration or persist across reboots." required: true immediate: description: @@ -64,15 +64,15 @@ options: version_added: "1.9" state: description: - - "Should this port accept(enabled) or reject(disabled) connections" + - "Should this port accept(enabled) or reject(disabled) connections." required: true timeout: description: - - "The amount of time the rule should be in effect for when non-permanent" + - "The amount of time the rule should be in effect for when non-permanent." required: false default: 0 notes: - - Not tested on any debian based system + - Not tested on any debian based system. requirements: [ firewalld >= 0.2.11 ] author: Adam Miller ''' @@ -80,6 +80,7 @@ author: Adam Miller EXAMPLES = ''' - firewalld: service=https permanent=true state=enabled - firewalld: port=8081/tcp permanent=true state=disabled +- firewalld: port=161-162/udp permanent=true state=enabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled - firewalld: source='192.168.1.0/24' zone=internal state=enabled diff --git a/system/getent.py b/system/getent.py index 7da1be45fae..bb6d162398c 100644 --- a/system/getent.py +++ b/system/getent.py @@ -86,7 +86,7 @@ def main(): database = dict(required=True), key = dict(required=False, default=None), split = dict(required=False, default=None), - fail_key = dict(required=False, default=True), + fail_key = dict(required=False, type='bool', default=True), ), supports_check_mode = True, ) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 8709cf778b1..2a8bc74df72 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -21,9 +21,10 @@ DOCUMENTATION = """ module: gluster_volume -short_description: Manage GlusterFs volumes +short_description: Manage GlusterFS volumes description: - Create, remove, start, stop and tune GlusterFS volumes +version_added: "1.9" options: name: required: true @@ -33,33 +34,40 @@ options: required: true choices: [ 'present', 'absent', 'started', 'stopped' ] description: - - Mode of operation: present/absent ensure if a module exists or not. - started/stopped make sure it is enabled or not. + - Use present/absent ensure if a volume exists or not, + use started/stopped to control it's availability. cluster: required: false + default: null description: - List of hosts to use for probing and brick setup host: required: false + default: null description: - Override local hostname (for peer probing purposes) replicas: required: false + default: null description: - Replica count for volume stripes: required: false + default: null description: - Stripe count for volume transport: required: false choices: [ 'tcp', 'rdma', 'tcp,rdma' ] + default: 'tcp' description: - Transport type for volume - brick: + bricks: required: false + default: null description: - - Brick path on servers + - Brick paths on servers. Multiple brick paths can be separated by commas + aliases: ['brick'] start_on_create: choices: [ 'yes', 'no'] required: false @@ -68,20 +76,30 @@ options: rebalance: choices: [ 'yes', 'no'] required: false + default: 'no' description: - Controls whether the cluster is rebalanced after changes directory: required: false + default: null description: - Directory for limit-usage options: required: false + default: null description: - A dictionary/hash with options/settings for the volume quota: required: false + default: null description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) + force: + required: false + default: null + description: + - If brick is being created in the root partition, module will fail. + Set force to true to override this behaviour notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" @@ -90,7 +108,7 @@ author: Taneli Leppä EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes hosts:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" run_once: true - name: tune @@ -107,168 +125,189 @@ EXAMPLES = """ - name: remove gluster volume gluster_volume: state=absent name=test1 + +- name: create gluster volume with multiple bricks + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster:"{{ play_hosts }}" + run_once: true """ import shutil import time import socket -def main(): +glusterbin = '' - - def run_gluster(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) +def run_gluster(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def run_gluster_nofail(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - return None - return out - - def run_gluster_yes(gargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, data='y\n') - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def get_peers(): - out = run_gluster([ 'peer', 'status']) - i = 0 - peers = {} - hostname = None - uuid = None - state = None - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'hostname': - hostname = value - if key.lower() == 'uuid': - uuid = value - if key.lower() == 'state': - state = value - peers[hostname] = [ uuid, state ] - return peers - - def get_volumes(): - out = run_gluster([ 'volume', 'info' ]) - - volumes = {} - volume = {} - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'volume name': - volume['name'] = value + except Exception, e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) + return out + +def run_gluster_nofail(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + return None + return out + +def run_gluster_yes(gargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, data='y\n') + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) + return out + +def get_peers(): + out = run_gluster([ 'peer', 'status']) + i = 0 + peers = {} + hostname = None + uuid = None + state = None + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'hostname': + hostname = value + if key.lower() == 'uuid': + uuid = value + if key.lower() == 'state': + state = value + peers[hostname] = [ uuid, state ] + return peers + +def get_volumes(): + out = run_gluster([ 'volume', 'info' ]) + + volumes = {} + volume = {} + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'volume name': + volume['name'] = value + volume['options'] = {} + volume['quota'] = False + if key.lower() == 'volume id': + volume['id'] = value + if key.lower() == 'status': + volume['status'] = value + if key.lower() == 'transport-type': + volume['transport'] = value + if key.lower() != 'bricks' and key.lower()[:5] == 'brick': + if not 'bricks' in volume: + volume['bricks'] = [] + volume['bricks'].append(value) + # Volume options + if '.' in key: + if not 'options' in volume: volume['options'] = {} - volume['quota'] = False - if key.lower() == 'volume id': - volume['id'] = value - if key.lower() == 'status': - volume['status'] = value - if key.lower() == 'transport-type': - volume['transport'] = value - if key.lower() != 'bricks' and key.lower()[:5] == 'brick': - if not 'bricks' in volume: - volume['bricks'] = [] - volume['bricks'].append(value) - # Volume options - if '.' in key: - if not 'options' in volume: - volume['options'] = {} - volume['options'][key] = value - if key == 'features.quota' and value == 'on': - volume['quota'] = True - else: - if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': - if len(volume) > 0: - volumes[volume['name']] = volume - volume = {} - return volumes - - def get_quotas(name, nofail): - quotas = {} - if nofail: - out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) - if not out: - return quotas + volume['options'][key] = value + if key == 'features.quota' and value == 'on': + volume['quota'] = True else: - out = run_gluster([ 'volume', 'quota', name, 'list' ]) - for row in out.split('\n'): - if row[:1] == '/': - q = re.split('\s+', row) - quotas[q[0]] = q[1] - return quotas - - def wait_for_peer(host): - for x in range(0, 4): - peers = get_peers() - if host in peers and peers[host][1].lower().find('peer in cluster') != -1: - return True - time.sleep(1) - return False - - def probe(host): - run_gluster([ 'peer', 'probe', host ]) - if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) - changed = True - - def probe_all_peers(hosts, peers, myhostname): - for host in hosts: - if host not in peers: - # dont probe ourselves - if myhostname != host: - probe(host) - - def create_volume(name, stripe, replica, transport, hosts, brick): - args = [ 'volume', 'create' ] - args.append(name) - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - args.append('transport') - args.append(transport) + if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': + if len(volume) > 0: + volumes[volume['name']] = volume + volume = {} + return volumes + +def get_quotas(name, nofail): + quotas = {} + if nofail: + out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) + if not out: + return quotas + else: + out = run_gluster([ 'volume', 'quota', name, 'list' ]) + for row in out.split('\n'): + if row[:1] == '/': + q = re.split('\s+', row) + quotas[q[0]] = q[1] + return quotas + +def wait_for_peer(host): + for x in range(0, 4): + peers = get_peers() + if host in peers and peers[host][1].lower().find('peer in cluster') != -1: + return True + time.sleep(1) + return False + +def probe(host): + global module + run_gluster([ 'peer', 'probe', host ]) + if not wait_for_peer(host): + module.fail_json(msg='failed to probe peer %s' % host) + changed = True + +def probe_all_peers(hosts, peers, myhostname): + for host in hosts: + if host not in peers: + # dont probe ourselves + if myhostname != host: + probe(host) + +def create_volume(name, stripe, replica, transport, hosts, bricks, force): + args = [ 'volume', 'create' ] + args.append(name) + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.append('transport') + args.append(transport) + for brick in bricks: for host in hosts: args.append(('%s:%s' % (host, brick))) - run_gluster(args) + if force: + args.append('force') + run_gluster(args) - def start_volume(name): - run_gluster([ 'volume', 'start', name ]) +def start_volume(name): + run_gluster([ 'volume', 'start', name ]) - def stop_volume(name): - run_gluster_yes([ 'volume', 'stop', name ]) +def stop_volume(name): + run_gluster_yes([ 'volume', 'stop', name ]) - def set_volume_option(name, option, parameter): - run_gluster([ 'volume', 'set', name, option, parameter ]) +def set_volume_option(name, option, parameter): + run_gluster([ 'volume', 'set', name, option, parameter ]) - def add_brick(name, brick): - run_gluster([ 'volume', 'add-brick', name, brick ]) +def add_brick(name, brick, force): + args = [ 'volume', 'add-brick', name, brick ] + if force: + args.append('force') + run_gluster(args) - def rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) +def do_rebalance(name): + run_gluster([ 'volume', 'rebalance', name, 'start' ]) - def enable_quota(name): - run_gluster([ 'volume', 'quota', name, 'enable' ]) +def enable_quota(name): + run_gluster([ 'volume', 'quota', name, 'enable' ]) - def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) +def set_quota(name, directory, value): + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) +def main(): ### MAIN ### + global module module = AnsibleModule( argument_spec=dict( name=dict(required=True, default=None, aliases=['volume']), @@ -278,15 +317,17 @@ def main(): stripes=dict(required=False, default=None, type='int'), replicas=dict(required=False, default=None, type='int'), transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), - brick=dict(required=False, default=None), + bricks=dict(required=False, default=None, aliases=['brick']), start_on_create=dict(required=False, default=True, type='bool'), - rebalance=dict(required=False, default=False, taype='bool'), - options=dict(required=False, default=None, type='dict'), + rebalance=dict(required=False, default=False, type='bool'), + options=dict(required=False, default={}, type='dict'), quota=dict(required=False), directory=dict(required=False, default=None), + force=dict(required=False, default=False, type='bool'), ) ) + global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False @@ -294,17 +335,23 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_path = module.params['brick'] + brick_paths = module.params['brick'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] myhostname = module.params['host'] - start_volume = module.boolean(module.params['start_on_create']) + start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) + force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() + if brick_paths != None and "," in brick_paths: + brick_paths = brick_paths.split(",") + else: + brick_paths = [brick_paths] + options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] @@ -320,7 +367,9 @@ def main(): # do the work! if action == 'absent': if volume_name in volumes: - run_gluster([ 'volume', 'delete', name ]) + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + run_gluster_yes([ 'volume', 'delete', volume_name ]) changed = True if action == 'present': @@ -328,11 +377,12 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_path) + create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force) + volumes = get_volumes() changed = True if volume_name in volumes: - if volumes[volume_name]['status'].lower() != 'started' and start_volume: + if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True @@ -341,10 +391,11 @@ def main(): removed_bricks = [] all_bricks = [] for node in cluster: - brick = '%s:%s' % (node, brick_path) - all_bricks.append(brick) - if brick not in volumes[volume_name]['bricks']: - new_bricks.append(brick) + for brick_path in brick_paths: + brick = '%s:%s' % (node, brick_path) + all_bricks.append(brick) + if brick not in volumes[volume_name]['bricks']: + new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: @@ -352,7 +403,7 @@ def main(): removed_bricks.append(brick) for brick in new_bricks: - add_brick(volume_name, brick) + add_brick(volume_name, brick, force) changed = True # handle quotas @@ -389,7 +440,7 @@ def main(): if changed: volumes = get_volumes() if rebalance: - rebalance(volume_name) + do_rebalance(volume_name) facts = {} facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas } diff --git a/system/known_hosts.py b/system/known_hosts.py new file mode 100644 index 00000000000..86876cd4931 --- /dev/null +++ b/system/known_hosts.py @@ -0,0 +1,254 @@ +#!/usr/bin/python + +""" +Ansible module to manage the ssh known_hosts file. +Copyright(c) 2014, Matthew Vernon + +This module is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This module is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this module. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: known_hosts +short_description: Add or remove a host from the C(known_hosts) file +description: + - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. + This is useful if you're going to want to use the M(git) module over ssh, for example. + If you have a very large number of host keys to manage, you will find the M(template) module more useful. +version_added: "1.9" +options: + name: + aliases: [ 'host' ] + description: + - The host to add or remove (must match a host specified in key) + required: true + default: null + key: + description: + - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed) + required: false + default: null + path: + description: + - The known_hosts file to edit + required: no + default: "(homedir)+/.ssh/known_hosts" + state: + description: + - I(present) to add the host, I(absent) to remove it. + choices: [ "present", "absent" ] + required: no + default: present +requirements: [ ] +author: Matthew Vernon +''' + +EXAMPLES = ''' +# Example using with_file to set the system known_hosts file +- name: tell the host about our servers it might want to ssh to + known_hosts: path='/etc/ssh/ssh_known_hosts' + host='foo.com.invalid' + key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}" +''' + +# Makes sure public host keys are present or absent in the given known_hosts +# file. +# +# Arguments +# ========= +# name = hostname whose key should be added (alias: host) +# key = line(s) to add to known_hosts file +# path = the known_hosts file to edit (default: ~/.ssh/known_hosts) +# state = absent|present (default: present) + +import os +import os.path +import tempfile +import errno + +def enforce_state(module, params): + """ + Add or remove key. + """ + + host = params["name"] + key = params.get("key",None) + port = params.get("port",None) + #expand the path parameter; otherwise module.add_path_info + #(called by exit_json) unhelpfully says the unexpanded path is absent. + path = os.path.expanduser(params.get("path")) + state = params.get("state") + #Find the ssh-keygen binary + sshkeygen = module.get_bin_path("ssh-keygen",True) + + #trailing newline in files gets lost, so re-add if necessary + if key is not None and key[-1]!='\n': + key+='\n' + + if key is None and state != "absent": + module.fail_json(msg="No key specified when adding a host") + + sanity_check(module,host,key,sshkeygen) + + current,replace=search_for_host_key(module,host,key,path,sshkeygen) + + #We will change state if current==True & state!="present" + #or current==False & state=="present" + #i.e (current) XOR (state=="present") + #Alternatively, if replace is true (i.e. key present, and we must change it) + if module.check_mode: + module.exit_json(changed = replace or ((state=="present") != current)) + + #Now do the work. + + #First, remove an extant entry if required + if replace==True or (current==True and state=="absent"): + module.run_command([sshkeygen,'-R',host,'-f',path], + check_rc=True) + params['changed'] = True + #Next, add a new (or replacing) entry + if replace==True or (current==False and state=="present"): + try: + inf=open(path,"r") + except IOError, e: + if e.errno == errno.ENOENT: + inf=None + else: + module.fail_json(msg="Failed to read %s: %s" % \ + (path,str(e))) + try: + outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path)) + if inf is not None: + for line in inf: + outf.write(line) + inf.close() + outf.write(key) + outf.flush() + module.atomic_move(outf.name,path) + except (IOError,OSError),e: + module.fail_json(msg="Failed to write to file %s: %s" % \ + (path,str(e))) + + try: + outf.close() + except: + pass + + params['changed'] = True + + return params + +def sanity_check(module,host,key,sshkeygen): + '''Check supplied key is sensible + + host and key are parameters provided by the user; If the host + provided is inconsistent with the key supplied, then this function + quits, providing an error to the user. + sshkeygen is the path to ssh-keygen, found earlier with get_bin_path + ''' + #If no key supplied, we're doing a removal, and have nothing to check here. + if key is None: + return + #Rather than parsing the key ourselves, get ssh-keygen to do it + #(this is essential for hashed keys, but otherwise useful, as the + #key question is whether ssh-keygen thinks the key matches the host). + + #The approach is to write the key to a temporary file, + #and then attempt to look up the specified host in that file. + try: + outf=tempfile.NamedTemporaryFile() + outf.write(key) + outf.flush() + except IOError,e: + module.fail_json(msg="Failed to write to temporary file %s: %s" % \ + (outf.name,str(e))) + rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, + '-f',outf.name], + check_rc=True) + try: + outf.close() + except: + pass + + if stdout=='': #host not found + module.fail_json(msg="Host parameter does not match hashed host field in supplied key") + +def search_for_host_key(module,host,key,path,sshkeygen): + '''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace) + + Looks up host in the known_hosts file path; if it's there, looks to see + if one of those entries matches key. Returns: + current (Boolean): is host found in path? + replace (Boolean): is the key in path different to that supplied by user? + if current=False, then replace is always False. + sshkeygen is the path to ssh-keygen, found earlier with get_bin_path + ''' + replace=False + if os.path.exists(path)==False: + return False, False + #openssh >=6.4 has changed ssh-keygen behaviour such that it returns + #1 if no host is found, whereas previously it returned 0 + rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], + check_rc=False) + if stdout=='' and stderr=='' and (rc==0 or rc==1): + return False, False #host not found, no other errors + if rc!=0: #something went wrong + module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) + +#If user supplied no key, we don't want to try and replace anything with it + if key is None: + return True, False + + lines=stdout.split('\n') + k=key.strip() #trim trailing newline + #ssh-keygen returns only the host we ask about in the host field, + #even if the key entry has multiple hosts. Emulate this behaviour here, + #otherwise we get false negatives. + #Only necessary for unhashed entries. + if k[0] !='|': + k=k.split() + #The optional "marker" field, used for @cert-authority or @revoked + if k[0][0] == '@': + k[1]=host + else: + k[0]=host + k=' '.join(k) + for l in lines: + if l=='': + continue + if l[0]=='#': #comment + continue + if k==l: #found a match + return True, False #current, not-replace + #No match found, return current and replace + return True, True + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, type='str', aliases=['host']), + key = dict(required=False, type='str'), + path = dict(default="~/.ssh/known_hosts", type='str'), + state = dict(default='present', choices=['absent','present']), + ), + supports_check_mode = True + ) + + results = enforce_state(module,module.params) + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/system/locale_gen.py b/system/locale_gen.py index 9ff0a87f36a..c5943cd63a0 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -4,6 +4,7 @@ import os import os.path from subprocess import Popen, PIPE, call +import re DOCUMENTATION = ''' --- @@ -32,10 +33,35 @@ EXAMPLES = ''' - locale_gen: name=de_CH.UTF-8 state=present ''' +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", +} + # =========================================== # location module specific support methods. # +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = '^(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = '^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + with open(__locales_available, 'r') as fd: + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + return False + def is_present(name): """Checks if the given locale is currently installed.""" output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] @@ -44,7 +70,9 @@ def is_present(name): def fix_case(name): """locale -a might return the encoding in either lower or upper case. Passing through this function makes them uniform for comparisons.""" - return name.replace(".utf8", ".UTF-8") + for s, r in LOCALE_NORMALIZATION.iteritems(): + name = name.replace(s, r) + return name def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" @@ -53,32 +81,42 @@ def replace_line(existing_line, new_line): with open("/etc/locale.gen", "w") as f: f.write("".join(lines)) -def apply_change(targetState, name, encoding): +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = '#{0,1}\s*%s (?P.+)' % name + if enabled: + new_string = '%s \g' % (name) + else: + new_string = '# %s \g' % (name) + with open("/etc/locale.gen", "r") as f: + lines = [re.sub(search_string, new_string, line) for line in f] + with open("/etc/locale.gen", "w") as f: + f.write("".join(lines)) + +def apply_change(targetState, name): """Create or remove locale. - + Keyword arguments: targetState -- Desired state, either present or absent. name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. """ if targetState=="present": # Create locale. - replace_line("# "+name+" "+encoding, name+" "+encoding) + set_locale(name, enabled=True) else: # Delete locale. - replace_line(name+" "+encoding, "# "+name+" "+encoding) + set_locale(name, enabled=False) localeGenExitValue = call("locale-gen") if localeGenExitValue!=0: raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) -def apply_change_ubuntu(targetState, name, encoding): +def apply_change_ubuntu(targetState, name): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. """ if targetState=="present": # Create locale. @@ -90,7 +128,8 @@ def apply_change_ubuntu(targetState, name, encoding): content = f.readlines() with open("/var/lib/locales/supported.d/local", "w") as f: for line in content: - if line!=(name+" "+encoding+"\n"): + locale, charset = line.split(' ') + if locale != name: f.write(line) # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! @@ -107,14 +146,12 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(choices=['present','absent'], required=True), + state = dict(choices=['present','absent'], default='present'), ), supports_check_mode=True ) name = module.params['name'] - if not "." in name: - module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") state = module.params['state'] if not os.path.exists("/etc/locale.gen"): @@ -122,27 +159,30 @@ def main(): # Ubuntu created its own system to manage locales. ubuntuMode = True else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") else: # We found the common way to manage locales. ubuntuMode = False - + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locales you've entered is not available " + "on your system.") + prev_state = "present" if is_present(name) else "absent" changed = (prev_state!=state) if module.check_mode: module.exit_json(changed=changed) else: - encoding = name.split(".")[1] if changed: try: if ubuntuMode==False: - apply_change(state, name, encoding) + apply_change(state, name) else: - apply_change_ubuntu(state, name, encoding) + apply_change_ubuntu(state, name) except EnvironmentError as e: module.fail_json(msg=e.strerror, exitValue=e.errno) - + module.exit_json(name=name, changed=changed, msg="OK") # import module snippets diff --git a/system/lvg.py b/system/lvg.py index e568e9df677..295ee24e3c6 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -135,7 +135,9 @@ def main(): elif state == 'present': module.fail_json(msg="No physical volumes given.") - + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) if state=='present': ### check given devices diff --git a/system/lvol.py b/system/lvol.py index 96f1b846e27..d9be9e7dc70 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -132,8 +132,8 @@ def main(): # LVCREATE(8) -L --size option unit elif size[-1].isalpha(): - if size[-1] in 'bBsSkKmMgGtTpPeE': - size_unit = size[-1] + if size[-1].lower() in 'bskmgtpe': + size_unit = size[-1].lower() if size[0:-1].isdigit(): size = int(size[0:-1]) else: @@ -152,8 +152,9 @@ def main(): else: unit = size_unit + lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) + "%s --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': @@ -185,7 +186,8 @@ def main(): if module.check_mode: changed = True else: - rc, _, err = module.run_command("lvcreate -n %s -%s %s%s %s" % (lv, size_opt, size, size_unit, vg)) + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: @@ -197,7 +199,8 @@ def main(): module.exit_json(changed=True) if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) - rc, _, err = module.run_command("lvremove --force %s/%s" % (vg, this_lv['name'])) + lvremove_cmd = module.get_bin_path("lvremove", required=True) + rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: @@ -209,11 +212,12 @@ def main(): ### resize LV tool = None if size > this_lv['size']: - tool = 'lvextend' + tool = module.get_bin_path("lvextend", required=True) elif size < this_lv['size']: if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = 'lvreduce --force' + tool = module.get_bin_path("lvextend", required=True) + tool.append("--force") if tool: if module.check_mode: diff --git a/system/modprobe.py b/system/modprobe.py index 50c8f72fb2a..af845ae8cf5 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -97,13 +97,13 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) + rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: - rc, _, err = module.run_command(['rmmod', args['name']]) + rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True diff --git a/system/svc.py b/system/svc.py new file mode 100755 index 00000000000..04749cfc134 --- /dev/null +++ b/system/svc.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: svc +author: Brian Coca +version_added: +short_description: Manage daemontools services. +description: + - Controls daemontools services on remote hosts using the svc utility. +options: + name: + required: true + description: + - Name of the service to manage. + state: + required: false + choices: [ started, stopped, restarted, reloaded, once ] + description: + - C(Started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + svc (svc -t) and C(killed) will always bounce the svc (svc -k). + C(reloaded) will send a sigusr1 (svc -u). + C(once) will run a normally downed svc once (svc -o), not really + an idempotent operation. + downed: + required: false + choices: [ "yes", "no" ] + default: no + description: + - Should a 'down' file exist or not, if it exists it disables auto startup. + defaults to no. Downed does not imply stopped. + enabled: + required: false + choices: [ "yes", "no" ] + description: + - Wheater the service is enabled or not, if disabled it also implies stopped. + Make note that a service can be enabled and downed (no auto restart). + service_dir: + required: false + default: /service + description: + - directory svscan watches for services + service_src: + required: false + description: + - directory where services are defined, the source of symlinks to service_dir. +''' + +EXAMPLES = ''' +# Example action to start svc dnscache, if not running + - svc: name=dnscache state=started + +# Example action to stop svc dnscache, if running + - svc: name=dnscache state=stopped + +# Example action to kill svc dnscache, in all cases + - svc : name=dnscache state=killed + +# Example action to restart svc dnscache, in all cases + - svc : name=dnscache state=restarted + +# Example action to reload svc dnscache, in all cases + - svc: name=dnscache state=reloaded + +# Example using alt svc directory location + - svc: name=dnscache state=reloaded service_dir=/var/service +''' + +import platform +import shlex + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + +class Svc(object): + """ + Main class that handles daemontools, can be subclassed and overriden in case + we want to use a 'derivative' like encore, s6, etc + """ + + + #def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + + + def __init__(self, module): + self.extra_paths = [ '/command', '/usr/local/bin' ] + self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.downed = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([ self.service_dir, self.name ]) + self.src_full = '/'.join([ self.service_src, self.name ]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.downed = os.path.lexists('%s/down' % self.svc_full) + self.get_status() + else: + self.downed = os.path.lexists('%s/down' % self.src_full) + self.state = 'stopped' + + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError, e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + try: + os.unlink(self.svc_full) + except OSError, e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e)) + self.execute_command([self.svc_cmd,'-dx',self.src_full]) + + src_log = '%s/log' % self.src_full + if os.path.exists(src_log): + self.execute_command([self.svc_cmd,'-dx',src_log]) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + + m = re.search('\(pid (\d+)\)', out) + if m: + self.pid = m.group(1) + + m = re.search('(\d+) seconds', out) + if m: + self.duration = m.group(1) + + if re.search(' up ', out): + self.state = 'start' + elif re.search(' down ', out): + self.state = 'stopp' + else: + self.state = 'unknown' + return + + if re.search(' want ', out): + self.state += 'ing' + else: + self.state += 'ed' + + def start(self): + return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + + def stopp(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + + def reload(self): + return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + + def restart(self): + return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + + def kill(self): + return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(' '.join(cmd)) + except Exception, e: + self.module.fail_json(msg="failed to execute: %s" % str(e)) + return (rc, out, err) + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']), + enabled = dict(required=False, type='bool', choices=BOOLEANS), + downed = dict(required=False, type='bool', choices=BOOLEANS), + dist = dict(required=False, default='daemontools'), + service_dir = dict(required=False, default='/service'), + service_src = dict(required=False, default='/etc/service'), + ), + supports_check_mode=True, + ) + + state = module.params['state'] + enabled = module.params['enabled'] + downed = module.params['downed'] + + svc = Svc(module) + changed = False + orig_state = svc.report() + + if enabled is not None and enabled != svc.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + svc.enable() + else: + svc.disable() + except (OSError, IOError), e: + module.fail_json(msg="Could change service link: %s" % str(e)) + + if state is not None and state != svc.state: + changed = True + if not module.check_mode: + getattr(svc,state[:-2])() + + if downed is not None and downed != svc.downed: + changed = True + if not module.check_mode: + d_file = "%s/down" % svc.svc_full + try: + if downed: + open(d_file, "a").close() + else: + os.unlink(d_file) + except (OSError, IOError), e: + module.fail_json(msg="Could change downed file: %s " % (str(e))) + + module.exit_json(changed=changed, svc=svc.report()) + + +# this is magic, not normal python include +from ansible.module_utils.basic import * + +main()