diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000..d43c6b3b3fa --- /dev/null +++ b/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: python +python: + - "2.7" +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 + - python2.6 +script: + - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . + - python2.6 -m compileall -fq . + - python2.7 -m compileall -fq . + - ./test-docs.sh extras diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..38b95840a77 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,28 +1,37 @@ -Welcome To Ansible GitHub -========================= +Contributing to ansible-modules-extras +====================================== -Hi! Nice to see you here! +The Ansible Extras Modules are written and maintained by the Ansible community, according to the following contribution guidelines. + +If you'd like to contribute code +================================ + +Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. + +If you'd like to contribute code to an existing module +====================================================== +Each module in Extras is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + +If you'd like to contribute a new module +======================================== +Ansible welcomes new modules. Please be certain that you've read the [module development guide and standards](http://docs.ansible.com/developing_modules.html) thoroughly before submitting your module. + +Each new module requires two current module owners to approve a new module for inclusion. The Ansible community reviews new modules as often as possible, but please be patient; there are a lot of new module submissions in the pipeline, and it takes time to evaluate a new module for its adherence to module standards. + +Once your module is accepted, you become responsible for maintenance of that module, which means responding to pull requests and issues in a reasonably timely manner. If you'd like to ask a question =============================== Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. -The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. - -If you'd like to contribute code -================================ - -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. +The Github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. If you'd like to file a bug =========================== -I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. +Read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. Also please make sure you are testing on the latest released version of Ansible or the development branch. Thanks! - - - diff --git a/REVIEWERS.md b/REVIEWERS.md new file mode 100644 index 00000000000..5ae08b59b02 --- /dev/null +++ b/REVIEWERS.md @@ -0,0 +1,160 @@ +New module reviewers +==================== +The following list represents all current Github module reviewers. It's currently comprised of all Ansible module authors, past and present. + +Two +1 votes by any of these module reviewers on a new module pull request will result in the inclusion of that module into Ansible Extras. + +Active +====== +"Adam Garside (@fabulops)" +"Adam Keech (@smadam813)" +"Adam Miller (@maxamillion)" +"Alex Coomans (@drcapulet)" +"Alexander Bulimov (@abulimov)" +"Alexander Saltanov (@sashka)" +"Alexander Winkler (@dermute)" +"Andrew de Quincey (@adq)" +"André Paramés (@andreparames)" +"Andy Hill (@andyhky)" +"Artūras `arturaz` Šlajus (@arturaz)" +"Augustus Kling (@AugustusKling)" +"BOURDEL Paul (@pb8226)" +"Balazs Pocze (@banyek)" +"Ben Whaley (@bwhaley)" +"Benno Joy (@bennojoy)" +"Bernhard Weitzhofer (@b6d)" +"Boyd Adamson (@brontitall)" +"Brad Olson (@bradobro)" +"Brian Coca (@bcoca)" +"Brice Burgess (@briceburg)" +"Bruce Pennypacker (@bpennypacker)" +"Carson Gee (@carsongee)" +"Chris Church (@cchurch)" +"Chris Hoffman (@chrishoffman)" +"Chris Long (@alcamie101)" +"Chris Schmidt (@chrisisbeef)" +"Christian Berendt (@berendt)" +"Christopher H. Laco (@claco)" +"Cristian van Ee (@DJMuggs)" +"Dag Wieers (@dagwieers)" +"Dane Summers (@dsummersl)" +"Daniel Jaouen (@danieljaouen)" +"Daniel Schep (@dschep)" +"Dariusz Owczarek (@dareko)" +"Darryl Stoflet (@dstoflet)" +"David CHANIAL (@davixx)" +"David Stygstra (@stygstra)" +"Derek Carter (@goozbach)" +"Dimitrios Tydeas Mengidis (@dmtrs)" +"Doug Luce (@dougluce)" +"Dylan Martin (@pileofrogs)" +"Elliott Foster (@elliotttf)" +"Eric Johnson (@erjohnso)" +"Evan Duffield (@scicoin-project)" +"Evan Kaufman (@EvanK)" +"Evgenii Terechkov (@evgkrsk)" +"Franck Cuny (@franckcuny)" +"Gareth Rushgrove (@garethr)" +"Hagai Kariti (@hkariti)" +"Hector Acosta (@hacosta)" +"Hiroaki Nakamura (@hnakamur)" +"Ivan Vanderbyl (@ivanvanderbyl)" +"Jakub Jirutka (@jirutka)" +"James Cammarata (@jimi-c)" +"James Laska (@jlaska)" +"James S. Martin (@jsmartin)" +"Jan-Piet Mens (@jpmens)" +"Jayson Vantuyl (@jvantuyl)" +"Jens Depuydt (@jensdepuydt)" +"Jeroen Hoekx (@jhoekx)" +"Jesse Keating (@j2sol)" +"Jim Dalton (@jsdalton)" +"Jim Richardson (@weaselkeeper)" +"Jimmy Tang (@jcftang)" +"Johan Wiren (@johanwiren)" +"John Dewey (@retr0h)" +"John Jarvis (@jarv)" +"John Whitbeck (@jwhitbeck)" +"Jon Hawkesworth (@jhawkesworth)" +"Jonas Pfenniger (@zimbatm)" +"Jonathan I. Davila (@defionscode)" +"Joseph Callen (@jcpowermac)" +"Kevin Carter (@cloudnull)" +"Lester Wade (@lwade)" +"Lorin Hochstein (@lorin)" +"Manuel Sousa (@manuel-sousa)" +"Mark Theunissen (@marktheunissen)" +"Matt Coddington (@mcodd)" +"Matt Hite (@mhite)" +"Matt Makai (@makaimc)" +"Matt Martz (@sivel)" +"Matt Wright (@mattupstate)" +"Matthew Vernon (@mcv21)" +"Matthew Williams (@mgwilliams)" +"Matthias Vogelgesang (@matze)" +"Max Riveiro (@kavu)" +"Michael Gregson (@mgregson)" +"Michael J. Schultz (@mjschultz)" +"Michael Warkentin (@mwarkentin)" +"Mischa Peters (@mischapeters)" +"Monty Taylor (@emonty)" +"Nandor Sivok (@dominis)" +"Nate Coraor (@natefoo)" +"Nate Kingsley (@nate-kingsley)" +"Nick Harring (@NickatEpic)" +"Patrick Callahan (@dirtyharrycallahan)" +"Patrick Ogenstad (@ogenstad)" +"Patrick Pelletier (@skinp)" +"Patrik Lundin (@eest)" +"Paul Durivage (@angstwad)" +"Pavel Antonov (@softzilla)" +"Pepe Barbe (@elventear)" +"Peter Mounce (@petemounce)" +"Peter Oliver (@mavit)" +"Peter Sprygada (@privateip)" +"Peter Tan (@tanpeter)" +"Philippe Makowski (@pmakowski)" +"Phillip Gentry, CX Inc (@pcgentry)" +"Quentin Stafford-Fraser (@quentinsf)" +"Ramon de la Fuente (@ramondelafuente)" +"Raul Melo (@melodous)" +"Ravi Bhure (@ravibhure)" +"René Moser (@resmo)" +"Richard Hoop (@rhoop)" +"Richard Isaacson (@risaacson)" +"Rick Mendes (@rickmendes)" +"Romeo Theriault (@romeotheriault)" +"Scott Anderson (@tastychutney)" +"Sebastian Kornehl (@skornehl)" +"Serge van Ginderachter (@srvg)" +"Sergei Antipov (@UnderGreen)" +"Seth Edwards (@sedward)" +"Silviu Dicu (@silviud)" +"Simon JAILLET (@jails)" +"Stephen Fromm (@sfromm)" +"Steve (@groks)" +"Steve Gargan (@sgargan)" +"Steve Smith (@tarka)" +"Takashi Someda (@tksmd)" +"Taneli Leppä (@rosmo)" +"Tim Bielawa (@tbielawa)" +"Tim Bielawa (@tbielawa)" +"Tim Mahoney (@timmahoney)" +"Timothy Appnel (@tima)" +"Tom Bamford (@tombamford)" +"Trond Hindenes (@trondhindenes)" +"Vincent Van der Kussen (@vincentvdk)" +"Vincent Viallet (@zbal)" +"WAKAYAMA Shirou (@shirou)" +"Will Thames (@willthames)" +"Willy Barro (@willybarro)" +"Xabier Larrakoetxea (@slok)" +"Yeukhon Wong (@yeukhon)" +"Zacharie Eakin (@zeekin)" +"berenddeboer (@berenddeboer)" +"bleader (@bleader)" +"curtis (@ccollicutt)" + +Retired +======= +None yet :) diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000..53adb84c822 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.8.2 diff --git a/cloud/amazon/GUIDELINES.md b/cloud/amazon/GUIDELINES.md new file mode 100644 index 00000000000..ee5aea90ef7 --- /dev/null +++ b/cloud/amazon/GUIDELINES.md @@ -0,0 +1,88 @@ +Guidelines for AWS modules +-------------------------- + +Naming your module +================== + +Base the name of the module on the part of AWS that +you actually use. (A good rule of thumb is to take +whatever module you use with boto as a starting point). + +Don't further abbreviate names - if something is a well +known abbreviation due to it being a major component of +AWS, that's fine, but don't create new ones independently +(e.g. VPC, ELB, etc. are fine) + +Using boto +========== + +Wrap the `import` statements in a try block and fail the +module later on if the import fails + +``` +try: + import boto + import boto.module.that.you.use + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + module_specific_parameter=dict(), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') +``` + + +Try and keep backward compatibility with relatively recent +versions of boto. That means that if want to implement some +functionality that uses a new feature of boto, it should only +fail if that feature actually needs to be run, with a message +saying which version of boto is needed. + +Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`) +to check whether boto supports a feature rather than version checking + +e.g. from the `ec2` module: +``` +if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name +else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") +``` + + +Connecting to AWS +================= + +For EC2 you can just use + +``` +ec2 = ec2_connect(module) +``` + +For other modules, you should use `get_aws_connection_info` and then +`connect_to_aws`. To connect to an example `xyz` service: + +``` +region, ec2_url, aws_connect_params = get_aws_connection_info(module) +xyz = connect_to_aws(boto.xyz, region, **aws_connect_params) +``` + +The reason for using `get_aws_connection_info` and `connect_to_aws` +(and even `ec2_connect` uses those under the hood) rather than doing it +yourself is that they handle some of the more esoteric connection +options such as security tokens and boto profiles. diff --git a/cloud/amazon/__init__.py b/cloud/amazon/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py new file mode 100644 index 00000000000..1c9313bbf7b --- /dev/null +++ b/cloud/amazon/cloudtrail.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: cloudtrail +short_description: manage CloudTrail creation and deletion +description: + - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. +version_added: "2.0" +author: + - "Ansible Core Team" + - "Ted Timmons" +requirements: + - "boto >= 2.21" +options: + state: + description: + - add or remove CloudTrail configuration. + required: true + choices: ['enabled', 'disabled'] + name: + description: + - name for given CloudTrail configuration. + - This is a primary key and is used to identify the configuration. + s3_bucket_prefix: + description: + - bucket to place CloudTrail in. + - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html) + - required when state=enabled. + required: false + s3_key_prefix: + description: + - prefix to keys in bucket. A trailing slash is not necessary and will be removed. + required: false + include_global_events: + description: + - record API calls from global services such as IAM and STS? + required: false + default: false + choices: ["true", "false"] + + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + version_added: "1.5" + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + version_added: "1.5" + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + version_added: "1.5" + +extends_documentation_fragment: aws +""" + +EXAMPLES = """ + - name: enable cloudtrail + local_action: cloudtrail + state=enabled name=main s3_bucket_name=ourbucket + s3_key_prefix=cloudtrail region=us-east-1 + + - name: enable cloudtrail with different configuration + local_action: cloudtrail + state=enabled name=main s3_bucket_name=ourbucket2 + s3_key_prefix='' region=us-east-1 + + - name: remove cloudtrail + local_action: cloudtrail state=disabled name=main region=us-east-1 +""" + +HAS_BOTO = False +try: + import boto + import boto.cloudtrail + from boto.regioninfo import RegionInfo + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +class CloudTrailManager: + """Handles cloudtrail configuration""" + + def __init__(self, module, region=None, **aws_connect_params): + self.module = module + self.region = region + self.aws_connect_params = aws_connect_params + self.changed = False + + try: + self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + self.module.fail_json(msg=str(e)) + + def view_status(self, name): + return self.conn.get_trail_status(name) + + def view(self, name): + ret = self.conn.describe_trails(trail_name_list=[name]) + trailList = ret.get('trailList', []) + if len(trailList) == 1: + return trailList[0] + return None + + def exists(self, name=None): + ret = self.view(name) + if ret: + return True + return False + + def enable_logging(self, name): + '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.''' + self.conn.start_logging(name) + + + def enable(self, **create_args): + return self.conn.create_trail(**create_args) + + def update(self, **create_args): + return self.conn.update_trail(**create_args) + + def delete(self, name): + '''Delete a given cloudtrial configuration. Throws Exception on error.''' + self.conn.delete_trail(name) + + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['enabled', 'disabled'] }, + name={'required': True, 'type': 'str' }, + s3_bucket_name={'required': False, 'type': 'str' }, + s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, + include_global_events={'default':True, 'required': False, 'type': 'bool' }, + )) + required_together = ( ['state', 's3_bucket_name'] ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + ec2_url, access_key, secret_key, region = get_ec2_creds(module) + aws_connect_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ct_name = module.params['name'] + s3_bucket_name = module.params['s3_bucket_name'] + # remove trailing slash from the key prefix, really messes up the key structure. + s3_key_prefix = module.params['s3_key_prefix'].rstrip('/') + include_global_events = module.params['include_global_events'] + + #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + # module.fail_json(msg="ELBs are required for registration or viewing") + + cf_man = CloudTrailManager(module, region=region, **aws_connect_params) + + results = { 'changed': False } + if module.params['state'] == 'enabled': + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + results['view'] = cf_man.view(ct_name) + # only update if the values have changed. + if results['view']['S3BucketName'] != s3_bucket_name or \ + results['view']['S3KeyPrefix'] != s3_key_prefix or \ + results['view']['IncludeGlobalServiceEvents'] != include_global_events: + if not module.check_mode: + results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + else: + if not module.check_mode: + # doesn't exist. create it. + results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + + # given cloudtrail should exist now. Enable the logging. + results['view_status'] = cf_man.view_status(ct_name) + results['was_logging_enabled'] = results['view_status'].get('IsLogging', False) + if not results['was_logging_enabled']: + if not module.check_mode: + cf_man.enable_logging(ct_name) + results['logging_enabled'] = True + results['changed'] = True + + # delete the cloudtrai + elif module.params['state'] == 'disabled': + # check to see if it exists before deleting. + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + # it exists, so we should delete it and mark changed. + if not module.check_mode: + cf_man.delete(ct_name) + results['changed'] = True + + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py new file mode 100644 index 00000000000..c97ff6f0be0 --- /dev/null +++ b/cloud/amazon/dynamodb_table.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: dynamodb_table +short_description: Create, update or delete AWS Dynamo DB tables. +description: + - Create or delete AWS Dynamo DB tables. + - Can update the provisioned throughput on existing tables. + - Returns the status of the specified table. +version_added: "2.0" +author: Alan Loi (@loia) +version_added: "2.0" +requirements: + - "boto >= 2.13.2" +options: + state: + description: + - Create or delete the table + required: false + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Name of the table. + required: true + hash_key_name: + description: + - Name of the hash key. + - Required when C(state=present). + required: false + default: null + hash_key_type: + description: + - Type of the hash key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + range_key_name: + description: + - Name of the range key. + required: false + default: null + range_key_type: + description: + - Type of the range key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + read_capacity: + description: + - Read throughput capacity (units) to provision. + required: false + default: 1 + write_capacity: + description: + - Write throughput capacity (units) to provision. + required: false + default: 1 + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +# Create dynamo table with hash and range primary key +- dynamodb_table: + name: my-table + region: us-east-1 + hash_key_name: id + hash_key_type: STRING + range_key_name: create_time + range_key_type: NUMBER + read_capacity: 2 + write_capacity: 2 + +# Update capacity on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + read_capacity: 10 + write_capacity: 10 + +# Delete dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + state: absent +''' + +RETURN = ''' +table_status: + description: The current status of the table. + returned: success + type: string + sample: ACTIVE +''' + +try: + import boto + import boto.dynamodb2 + from boto.dynamodb2.table import Table + from boto.dynamodb2.fields import HashKey, RangeKey + from boto.dynamodb2.types import STRING, NUMBER, BINARY + from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError + HAS_BOTO = True + +except ImportError: + HAS_BOTO = False + + +DYNAMO_TYPE_MAP = { + 'STRING': STRING, + 'NUMBER': NUMBER, + 'BINARY': BINARY +} + + +def create_or_update_dynamo_table(connection, module): + table_name = module.params.get('name') + hash_key_name = module.params.get('hash_key_name') + hash_key_type = module.params.get('hash_key_type') + range_key_name = module.params.get('range_key_name') + range_key_type = module.params.get('range_key_type') + read_capacity = module.params.get('read_capacity') + write_capacity = module.params.get('write_capacity') + + schema = [ + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)), + RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type)) + ] + throughput = { + 'read': read_capacity, + 'write': write_capacity + } + + result = dict( + region=module.params.get('region'), + table_name=table_name, + hash_key_name=hash_key_name, + hash_key_type=hash_key_type, + range_key_name=range_key_name, + range_key_type=range_key_type, + read_capacity=read_capacity, + write_capacity=write_capacity, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode) + else: + if not module.check_mode: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + result['changed'] = True + + if not module.check_mode: + result['table_status'] = table.describe()['Table']['TableStatus'] + + except BotoServerError: + result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def delete_dynamo_table(connection, module): + table_name = module.params.get('name') + + result = dict( + region=module.params.get('region'), + table_name=table_name, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + if not module.check_mode: + table.delete() + result['changed'] = True + + else: + result['changed'] = False + + except BotoServerError: + result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def dynamo_table_exists(table): + try: + table.describe() + return True + + except JSONResponseError, e: + if e.message and e.message.startswith('Requested resource not found'): + return False + else: + raise e + + +def update_dynamo_table(table, throughput=None, check_mode=False): + table.describe() # populate table details + + if has_throughput_changed(table, throughput): + if not check_mode: + return table.update(throughput=throughput) + else: + return True + + return False + + +def has_throughput_changed(table, new_throughput): + if not new_throughput: + return False + + return new_throughput['read'] != table.throughput['read'] or \ + new_throughput['write'] != table.throughput['write'] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='str'), + hash_key_name=dict(required=True, type='str'), + hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + range_key_name=dict(type='str'), + range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + read_capacity=dict(default=1, type='int'), + write_capacity=dict(default=1, type='int'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg='region must be specified') + + try: + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + + except (NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + if state == 'present': + create_or_update_dynamo_table(connection, module) + elif state == 'absent': + delete_dynamo_table(connection, module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py new file mode 100644 index 00000000000..ff9bde88022 --- /dev/null +++ b/cloud/amazon/ec2_ami_copy.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_ami_copy +short_description: copies AMI between AWS regions, return new image id +description: + - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5 +version_added: "2.0" +options: + source_region: + description: + - the source region that AMI should be copied from + required: true + region: + description: + - the destination region that AMI should be copied to + required: true + aliases: ['aws_region', 'ec2_region', 'dest_region'] + source_image_id: + description: + - the id of the image in source region that should be copied + required: true + name: + description: + - The name of the new image to copy + required: false + default: null + description: + description: + - An optional human-readable string describing the contents and purpose of the new AMI. + required: false + default: null + wait: + description: + - wait for the copied AMI to be in state 'available' before returning. + required: false + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + required: false + default: 1200 + tags: + description: + - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}' + required: false + default: null + +author: Amir Moulavi +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic AMI Copy +- local_action: + module: ec2_ami_copy + source_region: eu-west-1 + dest_region: us-east-1 + source_image_id: ami-xxxxxxx + name: SuperService-new-AMI + description: latest patch + tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}' + wait: yes + register: image_id +''' + + +import sys +import time + +try: + import boto + import boto.ec2 + from boto.vpc import VPCConnection + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + +def copy_image(module, ec2): + """ + Copies an AMI + + module : AnsibleModule object + ec2: authenticated ec2 connection object + """ + + source_region = module.params.get('source_region') + source_image_id = module.params.get('source_image_id') + name = module.params.get('name') + description = module.params.get('description') + tags = module.params.get('tags') + wait_timeout = int(module.params.get('wait_timeout')) + wait = module.params.get('wait') + + try: + params = {'source_region': source_region, + 'source_image_id': source_image_id, + 'name': name, + 'description': description + } + + image_id = ec2.copy_image(**params).image_id + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + + img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait) + + img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait) + + register_tags_if_any(module, ec2, tags, image_id) + + module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True) + + +# register tags to the copied AMI in dest_region +def register_tags_if_any(module, ec2, tags, image_id): + if tags: + try: + ec2.create_tags([image_id], tags) + except Exception as e: + module.fail_json(msg=str(e)) + + +# wait here until the image is copied (i.e. the state becomes available +def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait): + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): + img = ec2.get_image(image_id) + time.sleep(3) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="timed out waiting for image to be copied") + return img + + +# wait until the image is recognized. +def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait): + for i in range(wait_timeout): + try: + return ec2.get_image(image_id) + except boto.exception.EC2ResponseError, e: + # This exception we expect initially right after registering the copy with EC2 API + if 'InvalidAMIID.NotFound' in e.error_code and wait: + time.sleep(1) + else: + # On any other exception we should fail + module.fail_json( + msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str( + e)) + else: + module.fail_json(msg="timed out waiting for image to be recognized") + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + source_region=dict(required=True), + source_image_id=dict(required=True), + name=dict(), + description=dict(default=""), + wait=dict(type='bool', default=False), + wait_timeout=dict(default=1200), + tags=dict(type='dict'))) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + ec2 = ec2_connect(module) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + try: + region, ec2_url, boto_params = get_aws_connection_info(module) + vpc = connect_to_aws(boto.vpc, region, **boto_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + if not region: + module.fail_json(msg="region must be specified") + + copy_image(module, ec2) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() + diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py new file mode 100644 index 00000000000..9e878e7d558 --- /dev/null +++ b/cloud/amazon/ec2_eni.py @@ -0,0 +1,404 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_eni +short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance +description: + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance. +version_added: "2.0" +author: Rob White, wimnat [at] gmail.com, @wimnat +options: + eni_id: + description: + - The ID of the ENI + required: false + default: null + instance_id: + description: + - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. + required: false + default: null + private_ip_address: + description: + - Private IP address. + required: false + default: null + subnet_id: + description: + - ID of subnet in which to create the ENI. Only required when state=present. + required: true + description: + description: + - Optional description of the ENI. + required: false + default: null + security_groups: + description: + - List of security groups associated with the interface. Only used when state=present. + required: false + default: null + state: + description: + - Create or delete ENI. + required: false + default: present + choices: [ 'present', 'absent' ] + device_index: + description: + - The index of the device for the network interface attachment on the instance. + required: false + default: 0 + force_detach: + description: + - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent. + required: false + default: no + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation. + required: false + source_dest_check: + description: + - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation. + required: false +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ENI. As no security group is defined, ENI will be created in default security group +- ec2_eni: + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI and attach it to an instance +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Destroy an ENI, detaching it from any instance if necessary +- ec2_eni: + eni_id: eni-xxxxxxx + force_detach: yes + state: absent + +# Update an ENI +- ec2_eni: + eni_id: eni-xxxxxxx + description: "My new description" + state: present + +# Detach an ENI from an instance +- ec2_eni: + eni_id: eni-xxxxxxx + instance_id: None + state: present + +### Delete an interface on termination +# First create the interface +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + register: eni + +# Modify the interface to enable the delete_on_terminaton flag +- ec2_eni: + eni_id: {{ "eni.interface.id" }} + delete_on_termination: true + +''' + +import time +import xml.etree.ElementTree as ET +import re + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_error_message(xml_string): + + root = ET.fromstring(xml_string) + for message in root.findall('.//Message'): + return message.text + + +def get_eni_info(interface): + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + +def wait_for_eni(eni, status): + + while True: + time.sleep(3) + eni.update() + # If the status is detached we just need attachment to disappear + if eni.attachment is None: + if status == "detached": + break + else: + if status == "attached" and eni.attachment.status == "attached": + break + + +def create_eni(connection, module): + + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + changed = False + + try: + eni = compare_eni(connection, module) + if eni is None: + eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups) + if instance_id is not None: + try: + eni.attach(instance_id, device_index) + except BotoServerError as ex: + eni.delete() + raise + changed = True + # Wait to allow creation / attachment to finish + wait_for_eni(eni, "attached") + eni.update() + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def modify_eni(connection, module): + + eni_id = module.params.get("eni_id") + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + force_detach = module.params.get("force_detach") + source_dest_check = module.params.get("source_dest_check") + delete_on_termination = module.params.get("delete_on_termination") + changed = False + + + try: + # Get the eni with the eni_id specified + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + if description is not None: + if eni.description != description: + connection.modify_network_interface_attribute(eni.id, "description", description) + changed = True + if security_groups is not None: + if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups): + connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups) + changed = True + if source_dest_check is not None: + if eni.source_dest_check != source_dest_check: + connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check) + changed = True + if delete_on_termination is not None: + if eni.attachment is not None: + if eni.attachment.delete_on_termination is not delete_on_termination: + connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) + changed = True + else: + module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached") + if eni.attachment is not None and instance_id is None and do_detach is True: + eni.detach(force_detach) + wait_for_eni(eni, "detached") + changed = True + else: + if instance_id is not None: + eni.attach(instance_id, device_index) + wait_for_eni(eni, "attached") + changed = True + + except BotoServerError as e: + print e + module.fail_json(msg=get_error_message(e.args[2])) + + eni.update() + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def delete_eni(connection, module): + + eni_id = module.params.get("eni_id") + force_detach = module.params.get("force_detach") + + try: + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + + if force_detach is True: + if eni.attachment is not None: + eni.detach(force_detach) + # Wait to allow detachment to finish + wait_for_eni(eni, "detached") + eni.update() + eni.delete() + changed = True + else: + eni.delete() + changed = True + + module.exit_json(changed=changed) + except BotoServerError as e: + msg = get_error_message(e.args[2]) + regex = re.compile('The networkInterface ID \'.*\' does not exist') + if regex.search(msg) is not None: + module.exit_json(changed=False) + else: + module.fail_json(msg=get_error_message(e.args[2])) + +def compare_eni(connection, module): + + eni_id = module.params.get("eni_id") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + + try: + all_eni = connection.get_all_network_interfaces(eni_id) + + for eni in all_eni: + remote_security_groups = get_sec_group_list(eni.groups) + if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): + return eni + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + return None + +def get_sec_group_list(groups): + + # Build list of remote security groups + remote_security_groups = [] + for group in groups: + remote_security_groups.append(group.id.encode()) + + return remote_security_groups + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + eni_id = dict(default=None), + instance_id = dict(default=None), + private_ip_address = dict(), + subnet_id = dict(), + description = dict(), + security_groups = dict(type='list'), + device_index = dict(default=0, type='int'), + state = dict(default='present', choices=['present', 'absent']), + force_detach = dict(default='no', type='bool'), + source_dest_check = dict(default=None, type='bool'), + delete_on_termination = dict(default=None, type='bool') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + state = module.params.get("state") + eni_id = module.params.get("eni_id") + + if state == 'present': + if eni_id is None: + if module.params.get("subnet_id") is None: + module.fail_json(msg="subnet_id must be specified when state=present") + create_eni(connection, module) + else: + modify_eni(connection, module) + elif state == 'absent': + if eni_id is None: + module.fail_json(msg="eni_id must be specified") + else: + delete_eni(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +# this is magic, see lib/ansible/module_common.py +#<> + +main() diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py new file mode 100644 index 00000000000..981358c33af --- /dev/null +++ b/cloud/amazon/ec2_eni_facts.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_eni_facts +short_description: Gather facts about ec2 ENI interfaces in AWS +description: + - Gather facts about ec2 ENI interfaces in AWS +version_added: "2.0" +author: "Rob White (@wimnat)" +options: + eni_id: + description: + - The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned. + required: false + default: null +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all ENIs +- ec2_eni_facts: + +# Gather facts about a particular ENI +- ec2_eni_facts: + eni_id: eni-xxxxxxx + +''' + +import xml.etree.ElementTree as ET + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_error_message(xml_string): + + root = ET.fromstring(xml_string) + for message in root.findall('.//Message'): + return message.text + + +def get_eni_info(interface): + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + + +def list_eni(connection, module): + + eni_id = module.params.get("eni_id") + interface_dict_array = [] + + try: + all_eni = connection.get_all_network_interfaces(eni_id) + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + for interface in all_eni: + interface_dict_array.append(get_eni_info(interface)) + + module.exit_json(interfaces=interface_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + eni_id = dict(default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_eni(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +# this is magic, see lib/ansible/module_common.py +#<> + +main() diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py new file mode 100644 index 00000000000..63be48248ef --- /dev/null +++ b/cloud/amazon/ec2_vpc_igw.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw +short_description: Manage an AWS VPC Internet gateway +description: + - Manage an AWS VPC Internet gateway +version_added: "2.0" +author: Robert Estelle, @erydo +options: + vpc_id: + description: + - The VPC ID for the VPC in which to manage the Internet Gateway. + required: true + default: null + state: + description: + - Create or terminate the IGW + required: false + default: present +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use +# in setting up NATs etc. + local_action: + module: ec2_vpc_igw + vpc_id: {{vpc.vpc_id}} + region: {{vpc.vpc.region}} + state: present + register: igw +''' + + +import sys # noqa + +try: + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + if __name__ != '__main__': + raise + + +class AnsibleIGWException(Exception): + pass + + +def ensure_igw_absent(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if not igws: + return {'changed': False} + + if check_mode: + return {'changed': True} + + for igw in igws: + try: + vpc_conn.detach_internet_gateway(igw.id, vpc_id) + vpc_conn.delete_internet_gateway(igw.id) + except EC2ResponseError as e: + raise AnsibleIGWException( + 'Unable to delete Internet Gateway, error: {0}'.format(e)) + + return {'changed': True} + + +def ensure_igw_present(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if len(igws) > 1: + raise AnsibleIGWException( + 'EC2 returned more than one Internet Gateway for VPC {0}, aborting' + .format(vpc_id)) + + if igws: + return {'changed': False, 'gateway_id': igws[0].id} + else: + if check_mode: + return {'changed': True, 'gateway_id': None} + + try: + igw = vpc_conn.create_internet_gateway() + vpc_conn.attach_internet_gateway(igw.id, vpc_id) + return {'changed': True, 'gateway_id': igw.id} + except EC2ResponseError as e: + raise AnsibleIGWException( + 'Unable to create Internet Gateway, error: {0}'.format(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + vpc_id = dict(required=True), + state = dict(choices=['present', 'absent'], default='present') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + vpc_id = module.params.get('vpc_id') + state = module.params.get('state', 'present') + + try: + if state == 'present': + result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode) + elif state == 'absent': + result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode) + except AnsibleIGWException as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py new file mode 100644 index 00000000000..b9cb029499a --- /dev/null +++ b/cloud/amazon/ec2_win_password.py @@ -0,0 +1,91 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +--- +module: ec2_win_password +short_description: gets the default administrator password for ec2 windows instances +description: + - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto. +version_added: "2.0" +author: "Rick Mendes (@rickmendes)" +options: + instance_id: + description: + - The instance id to get the password data from. + required: true + key_file: + description: + - path to the file containing the key pair used on the instance + required: true + region: + description: + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + default: null + aliases: [ 'aws_region', 'ec2_region' ] + +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Example of getting a password +tasks: +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_test_key.pem" +''' + +from base64 import b64decode +from os.path import expanduser +from Crypto.Cipher import PKCS1_v1_5 +from Crypto.PublicKey import RSA + +try: + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + instance_id = dict(required=True), + key_file = dict(required=True), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='Boto required for this module.') + + instance_id = module.params.get('instance_id') + key_file = expanduser(module.params.get('key_file')) + + ec2 = ec2_connect(module) + + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + + f = open(key_file, 'r') + key = RSA.importKey(f.read()) + cipher = PKCS1_v1_5.new(key) + sentinel = 'password decryption failed!!!' + + try: + decrypted = cipher.decrypt(decoded, sentinel) + except ValueError as e: + decrypted = None + + if decrypted == None: + module.exit_json(win_password='', changed=False) + else: + module.exit_json(win_password=decrypted, changed=True) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py new file mode 100644 index 00000000000..cc487af5e51 --- /dev/null +++ b/cloud/cloudstack/cs_account.py @@ -0,0 +1,410 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_account +short_description: Manages account on Apache CloudStack based clouds. +description: + - Create, disable, lock, enable and remove accounts. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of account. + required: true + username: + description: + - Username of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + password: + description: + - Password of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + first_name: + description: + - First name of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + last_name: + description: + - Last name of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + email: + description: + - Email of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + timezone: + description: + - Timezone of the user to be created if account did not exist. + required: false + default: null + network_domain: + description: + - Network domain of the account. + required: false + default: null + account_type: + description: + - Type of the account. + required: false + default: 'user' + choices: [ 'user', 'root_admin', 'domain_admin' ] + domain: + description: + - Domain the account is related to. + required: false + default: 'ROOT' + state: + description: + - State of the account. + required: false + default: 'present' + choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create an account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + username: customer_xy + password: S3Cur3 + last_name: Doe + first_name: John + email: john.doe@example.com + domain: CUSTOMERS + +# Lock an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: locked + +# Disable an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: disabled + +# Enable an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: enabled + +# Remove an account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the account. + returned: success + type: string + sample: linus@example.com +account_type: + description: Type of the account. + returned: success + type: string + sample: user +account_state: + description: State of the account. + returned: success + type: string + sample: enabled +network_domain: + description: Network domain of the account. + returned: success + type: string + sample: example.local +domain: + description: Domain the account is related. + returned: success + type: string + sample: ROOT +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAccount(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.account = None + self.account_types = { + 'user': 0, + 'root_admin': 1, + 'domain_admin': 2, + } + + + def get_account_type(self): + account_type = self.module.params.get('account_type') + return self.account_types[account_type] + + + def get_account(self): + if not self.account: + args = {} + args['listall'] = True + args['domainid'] = self.get_domain('id') + accounts = self.cs.listAccounts(**args) + if accounts: + account_name = self.module.params.get('name') + for a in accounts['account']: + if account_name in [ a['name'] ]: + self.account = a + break + + return self.account + + + def enable_account(self): + account = self.get_account() + if not account: + self.module.fail_json(msg="Failed: account not present") + + if account['state'].lower() != 'enabled': + self.result['changed'] = True + args = {} + args['id'] = account['id'] + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + if not self.module.check_mode: + res = self.cs.enableAccount(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + account = res['account'] + return account + + + def lock_account(self): + return self.lock_or_disable_account(lock=True) + + + def disable_account(self): + return self.lock_or_disable_account() + + + def lock_or_disable_account(self, lock=False): + account = self.get_account() + if not account: + self.module.fail_json(msg="Failed: account not present") + + # we need to enable the account to lock it. + if lock and account['state'].lower() == 'disabled': + account = self.enable_account() + + if lock and account['state'].lower() != 'locked' \ + or not lock and account['state'].lower() != 'disabled': + self.result['changed'] = True + args = {} + args['id'] = account['id'] + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + args['lock'] = lock + if not self.module.check_mode: + account = self.cs.disableAccount(**args) + + if 'errortext' in account: + self.module.fail_json(msg="Failed: '%s'" % account['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + account = self._poll_job(account, 'account') + return account + + + def present_account(self): + missing_params = [] + + if not self.module.params.get('email'): + missing_params.append('email') + + if not self.module.params.get('username'): + missing_params.append('username') + + if not self.module.params.get('password'): + missing_params.append('password') + + if not self.module.params.get('first_name'): + missing_params.append('first_name') + + if not self.module.params.get('last_name'): + missing_params.append('last_name') + + if missing_params: + self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + + account = self.get_account() + + if not account: + self.result['changed'] = True + + args = {} + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + args['accounttype'] = self.get_account_type() + args['networkdomain'] = self.module.params.get('network_domain') + args['username'] = self.module.params.get('username') + args['password'] = self.module.params.get('password') + args['firstname'] = self.module.params.get('first_name') + args['lastname'] = self.module.params.get('last_name') + args['email'] = self.module.params.get('email') + args['timezone'] = self.module.params.get('timezone') + if not self.module.check_mode: + res = self.cs.createAccount(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + account = res['account'] + return account + + + def absent_account(self): + account = self.get_account() + if account: + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.deleteAccount(id=account['id']) + + if 'errortext' in account: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'account') + return account + + + def get_result(self, account): + if account: + if 'name' in account: + self.result['name'] = account['name'] + if 'accounttype' in account: + for key,value in self.account_types.items(): + if value == account['accounttype']: + self.result['account_type'] = key + break + if 'state' in account: + self.result['account_state'] = account['state'] + if 'domain' in account: + self.result['domain'] = account['domain'] + if 'networkdomain' in account: + self.result['network_domain'] = account['networkdomain'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'), + account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'), + network_domain = dict(default=None), + domain = dict(default='ROOT'), + email = dict(default=None), + first_name = dict(default=None), + last_name = dict(default=None), + username = dict(default=None), + password = dict(default=None), + timezone = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_acc = AnsibleCloudStackAccount(module) + + state = module.params.get('state') + + if state in ['absent']: + account = acs_acc.absent_account() + + elif state in ['enabled']: + account = acs_acc.enable_account() + + elif state in ['disabled']: + account = acs_acc.disable_account() + + elif state in ['locked']: + account = acs_acc.lock_account() + + else: + account = acs_acc.present_account() + + result = acs_acc.get_result(account) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py new file mode 100644 index 00000000000..580cc5d7e8d --- /dev/null +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_affinitygroup +short_description: Manages affinity groups on Apache CloudStack based clouds. +description: + - Create and remove affinity groups. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the affinity group. + required: true + affinty_type: + description: + - Type of the affinity group. If not specified, first found affinity type is used. + required: false + default: null + description: + description: + - Description of the affinity group. + required: false + default: null + state: + description: + - State of the affinity group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + domain: + description: + - Domain the affinity group is related to. + required: false + default: null + account: + description: + - Account the affinity group is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + affinty_type: host anti-affinity + +# Remove a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of affinity group. + returned: success + type: string + sample: app +description: + description: Description of affinity group. + returned: success + type: string + sample: application affinity group +affinity_type: + description: Type of affinity group. + returned: success + type: string + sample: host anti-affinity +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.affinity_group = None + + + def get_affinity_group(self): + if not self.affinity_group: + affinity_group = self.module.params.get('name') + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + affinity_groups = self.cs.listAffinityGroups(**args) + if affinity_groups: + for a in affinity_groups['affinitygroup']: + if affinity_group in [ a['name'], a['id'] ]: + self.affinity_group = a + break + return self.affinity_group + + + def get_affinity_type(self): + affinity_type = self.module.params.get('affinty_type') + + affinity_types = self.cs.listAffinityGroupTypes() + if affinity_types: + if not affinity_type: + return affinity_types['affinityGroupType'][0]['type'] + + for a in affinity_types['affinityGroupType']: + if a['type'] == affinity_type: + return a['type'] + self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) + + + def create_affinity_group(self): + affinity_group = self.get_affinity_group() + if not affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['type'] = self.get_affinity_type() + args['description'] = self.module.params.get('description') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + if not self.module.check_mode: + res = self.cs.createAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + affinity_group = self._poll_job(res, 'affinitygroup') + return affinity_group + + + def remove_affinity_group(self): + affinity_group = self.get_affinity_group() + if affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + if not self.module.check_mode: + res = self.cs.deleteAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'affinitygroup') + return affinity_group + + + def get_result(self, affinity_group): + if affinity_group: + if 'name' in affinity_group: + self.result['name'] = affinity_group['name'] + if 'description' in affinity_group: + self.result['description'] = affinity_group['description'] + if 'type' in affinity_group: + self.result['affinity_type'] = affinity_group['type'] + if 'domain' in affinity_group: + self.result['domain'] = affinity_group['domain'] + if 'account' in affinity_group: + self.result['account'] = affinity_group['account'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + affinty_type = dict(default=None), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ag = AnsibleCloudStackAffinityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + affinity_group = acs_ag.remove_affinity_group() + else: + affinity_group = acs_ag.create_affinity_group() + + result = acs_ag.get_result(affinity_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py new file mode 100644 index 00000000000..f8749834120 --- /dev/null +++ b/cloud/cloudstack/cs_facts.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_facts +short_description: Gather facts on instances of Apache CloudStack based clouds. +description: + - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + filter: + description: + - Filter for a specific fact. + required: false + default: null + choices: + - cloudstack_service_offering + - cloudstack_availability_zone + - cloudstack_public_hostname + - cloudstack_public_ipv4 + - cloudstack_local_hostname + - cloudstack_local_ipv4 + - cloudstack_instance_id + - cloudstack_user_data +requirements: [ 'yaml' ] +''' + +EXAMPLES = ''' +# Gather all facts on instances +- name: Gather cloudstack facts + cs_facts: + +# Gather specific fact on instances +- name: Gather cloudstack facts + cs_facts: filter=cloudstack_instance_id +''' + +RETURN = ''' +--- +cloudstack_availability_zone: + description: zone the instance is deployed in. + returned: success + type: string + sample: ch-gva-2 +cloudstack_instance_id: + description: UUID of the instance. + returned: success + type: string + sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_hostname: + description: local hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_ipv4: + description: local IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_public_hostname: + description: public hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_public_ipv4: + description: public IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_service_offering: + description: service offering of the instance. + returned: success + type: string + sample: Micro 512mb 1cpu +cloudstack_user_data: + description: data of the instance provided by users. + returned: success + type: dict + sample: { "bla": "foo" } +''' + +import os + +try: + import yaml + has_lib_yaml = True +except ImportError: + has_lib_yaml = False + +CS_METADATA_BASE_URL = "http://%s/latest/meta-data" +CS_USERDATA_BASE_URL = "http://%s/latest/user-data" + +class CloudStackFacts(object): + + def __init__(self): + self.facts = ansible_facts(module) + self.api_ip = None + self.fact_paths = { + 'cloudstack_service_offering': 'service-offering', + 'cloudstack_availability_zone': 'availability-zone', + 'cloudstack_public_hostname': 'public-hostname', + 'cloudstack_public_ipv4': 'public-ipv4', + 'cloudstack_local_hostname': 'local-hostname', + 'cloudstack_local_ipv4': 'local-ipv4', + 'cloudstack_instance_id': 'instance-id' + } + + def run(self): + result = {} + filter = module.params.get('filter') + if not filter: + for key,path in self.fact_paths.iteritems(): + result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path) + result['cloudstack_user_data'] = self._get_user_data_json() + else: + if filter == 'cloudstack_user_data': + result['cloudstack_user_data'] = self._get_user_data_json() + elif filter in self.fact_paths: + result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter]) + return result + + + def _get_user_data_json(self): + try: + # this data come form users, we try what we can to parse it... + return yaml.load(self._fetch(CS_USERDATA_BASE_URL)) + except: + return None + + + def _fetch(self, path): + api_ip = self._get_api_ip() + if not api_ip: + return None + api_url = path % api_ip + (response, info) = fetch_url(module, api_url, force=True) + if response: + data = response.read() + else: + data = None + return data + + + def _get_dhcp_lease_file(self): + """Return the path of the lease file.""" + default_iface = self.facts['default_ipv4']['interface'] + dhcp_lease_file_locations = [ + '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu + '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6 + '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7 + '/var/db/dhclient.leases.%s' % default_iface, # openbsd + ] + for file_path in dhcp_lease_file_locations: + if os.path.exists(file_path): + return file_path + module.fail_json(msg="Could not find dhclient leases file.") + + + def _get_api_ip(self): + """Return the IP of the DHCP server.""" + if not self.api_ip: + dhcp_lease_file = self._get_dhcp_lease_file() + for line in open(dhcp_lease_file): + if 'dhcp-server-identifier' in line: + # get IP of string "option dhcp-server-identifier 185.19.28.176;" + line = line.translate(None, ';') + self.api_ip = line.split()[2] + break + if not self.api_ip: + module.fail_json(msg="No dhcp-server-identifier found in leases file.") + return self.api_ip + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + filter = dict(default=None, choices=[ + 'cloudstack_service_offering', + 'cloudstack_availability_zone', + 'cloudstack_public_hostname', + 'cloudstack_public_ipv4', + 'cloudstack_local_hostname', + 'cloudstack_local_ipv4', + 'cloudstack_instance_id', + 'cloudstack_user_data', + ]), + ), + supports_check_mode=False + ) + + if not has_lib_yaml: + module.fail_json(msg="missing python library: yaml") + + cs_facts = CloudStackFacts().run() + cs_facts_result = dict(changed=False, ansible_facts=cs_facts) + module.exit_json(**cs_facts_result) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.facts import * +main() diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py new file mode 100644 index 00000000000..96b3f20f7cf --- /dev/null +++ b/cloud/cloudstack/cs_firewall.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_firewall +short_description: Manages firewall rules on Apache CloudStack based clouds. +description: + - Creates and removes firewall rules. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + ip_address: + description: + - Public IP address the ingress rule is assigned to. + - Required if C(type=ingress). + required: false + default: null + network: + description: + - Network the egress rule is related to. + - Required if C(type=egress). + required: false + default: null + state: + description: + - State of the firewall rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + type: + description: + - Type of the firewall rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] + protocol: + description: + - Protocol of the firewall rule. + - C(all) is only available if C(type=egress) + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp', 'all' ] + cidr: + description: + - CIDR (full notation) to be used for firewall rule. + required: false + default: '0.0.0.0/0' + start_port: + description: + - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + aliases: [ 'port' ] + end_port: + description: + - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port). + required: false + default: null + icmp_type: + description: + - Type of the icmp message being sent. Considered if C(protocol=icmp). + required: false + default: null + icmp_code: + description: + - Error code for this icmp message. Considered if C(protocol=icmp). + required: false + default: null + domain: + description: + - Domain the firewall rule is related to. + required: false + default: null + account: + description: + - Account the firewall rule is related to. + required: false + default: null + project: + description: + - Name of the project the firewall rule is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + port: 80 + cidr: 1.2.3.4/32 + +# Allow inbound tcp/udp port 53 to 4.3.2.1 +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + port: 53 + protocol: '{{ item }}' + with_items: + - tcp + - udp + +# Ensure firewall rule is removed +- local_action: + module: cs_firewall + ip_address: 4.3.2.1 + start_port: 8000 + end_port: 8888 + cidr: 17.0.0.0/8 + state: absent + +# Allow all outbound traffic +- local_action: + module: cs_firewall + network: my_network + type: egress + protocol: all + +# Allow only HTTP outbound traffic for an IP +- local_action: + module: cs_firewall + network: my_network + type: egress + port: 80 + cidr: 10.101.1.20 +''' + +RETURN = ''' +--- +ip_address: + description: IP address of the rule if C(type=ingress) + returned: success + type: string + sample: 10.100.212.10 +type: + description: Type of the rule. + returned: success + type: string + sample: ingress +cidr: + description: CIDR of the rule. + returned: success + type: string + sample: 0.0.0.0/0 +protocol: + description: Protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: Start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: End port of the rule. + returned: success + type: int + sample: 80 +icmp_code: + description: ICMP code of the rule. + returned: success + type: int + sample: 1 +icmp_type: + description: ICMP type of the rule. + returned: success + type: int + sample: 1 +network: + description: Name of the network if C(type=egress) + returned: success + type: string + sample: my_network +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackFirewall(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.firewall_rule = None + + + def get_end_port(self): + if self.module.params.get('end_port'): + return self.module.params.get('end_port') + return self.module.params.get('start_port') + + + def get_firewall_rule(self): + if not self.firewall_rule: + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_end_port() + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + fw_type = self.module.params.get('type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol) + + if protocol == 'icmp' and not icmp_type: + self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type") + + if protocol == 'all' and fw_type != 'egress': + self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'" ) + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + if not args['networkid']: + self.module.fail_json(msg="missing required argument for type egress: network") + firewall_rules = self.cs.listEgressFirewallRules(**args) + else: + args['ipaddressid'] = self.get_ip_address('id') + if not args['ipaddressid']: + self.module.fail_json(msg="missing required argument for type ingress: ip_address") + firewall_rules = self.cs.listFirewallRules(**args) + + if firewall_rules and 'firewallrule' in firewall_rules: + for rule in firewall_rules['firewallrule']: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._egress_all_match(rule, protocol, fw_type) + + if type_match and protocol_match: + self.firewall_rule = rule + break + return self.firewall_rule + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _egress_all_match(self, rule, protocol, fw_type): + return protocol in ['all'] \ + and protocol == rule['protocol'] \ + and fw_type == 'egress' + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == rule['icmpcode'] \ + and icmp_type == rule['icmptype'] + + + def _type_cidr_match(self, rule, cidr): + return cidr == rule['cidrlist'] + + + def get_network(self, key=None, network=None): + if not network: + network = self.module.params.get('network') + + if not network: + return None + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + for n in networks['network']: + if network in [ n['displaytext'], n['name'], n['id'] ]: + return self._get_by_key(key, n) + break + self.module.fail_json(msg="Network '%s' not found" % network) + + + def create_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + self.result['changed'] = True + + args = {} + args['cidrlist'] = self.module.params.get('cidr') + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.get_end_port() + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + + fw_type = self.module.params.get('type') + if not self.module.check_mode: + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + res = self.cs.createEgressFirewallRule(**args) + else: + args['ipaddressid'] = self.get_ip_address('id') + res = self.cs.createFirewallRule(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + firewall_rule = self._poll_job(res, 'firewallrule') + return firewall_rule + + + def remove_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + + args = {} + args['id'] = firewall_rule['id'] + + fw_type = self.module.params.get('type') + if not self.module.check_mode: + if fw_type == 'egress': + res = self.cs.deleteEgressFirewallRule(**args) + else: + res = self.cs.deleteFirewallRule(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'firewallrule') + return firewall_rule + + + def get_result(self, firewall_rule): + if firewall_rule: + self.result['type'] = self.module.params.get('type') + if 'cidrlist' in firewall_rule: + self.result['cidr'] = firewall_rule['cidrlist'] + if 'startport' in firewall_rule: + self.result['start_port'] = int(firewall_rule['startport']) + if 'endport' in firewall_rule: + self.result['end_port'] = int(firewall_rule['endport']) + if 'protocol' in firewall_rule: + self.result['protocol'] = firewall_rule['protocol'] + if 'ipaddress' in firewall_rule: + self.result['ip_address'] = firewall_rule['ipaddress'] + if 'icmpcode' in firewall_rule: + self.result['icmp_code'] = int(firewall_rule['icmpcode']) + if 'icmptype' in firewall_rule: + self.result['icmp_type'] = int(firewall_rule['icmptype']) + if 'networkid' in firewall_rule: + self.result['network'] = self.get_network(key='displaytext', network=firewall_rule['networkid']) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(default=None), + network = dict(default=None), + cidr = dict(default='0.0.0.0/0'), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), + type = dict(choices=['ingress', 'egress'], default='ingress'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', aliases=['port'], default=None), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_one_of = ( + ['ip_address', 'network'], + ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['ip_address', 'network'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_fw = AnsibleCloudStackFirewall(module) + + state = module.params.get('state') + if state in ['absent']: + fw_rule = acs_fw.remove_firewall_rule() + else: + fw_rule = acs_fw.create_firewall_rule() + + result = acs_fw.get_result(fw_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py new file mode 100644 index 00000000000..0d156390e83 --- /dev/null +++ b/cloud/cloudstack/cs_instance.py @@ -0,0 +1,865 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instance +short_description: Manages instances and virtual machines on Apache CloudStack based clouds. +description: + - Deploy, start, restart, stop and destroy instances. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Host name of the instance. C(name) can only contain ASCII letters. + required: true + display_name: + description: + - Custom display name of the instances. + required: false + default: null + group: + description: + - Group in where the new instance should be in. + required: false + default: null + state: + description: + - State of the instance. + required: false + default: 'present' + choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] + service_offering: + description: + - Name or id of the service offering of the new instance. + - If not set, first found service offering is used. + required: false + default: null + template: + description: + - Name or id of the template to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(ISO) option. + required: false + default: null + iso: + description: + - Name or id of the ISO to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(template) option. + required: false + default: null + hypervisor: + description: + - Name the hypervisor to be used for creating the new instance. + - Relevant when using C(state=present), but only considered if not set on ISO/template. + - If not set or found on ISO/template, first found hypervisor will be used. + required: false + default: null + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + keyboard: + description: + - Keyboard device type for the instance. + required: false + default: null + choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] + networks: + description: + - List of networks to use for the new instance. + required: false + default: [] + aliases: [ 'network' ] + ip_address: + description: + - IPv4 address for default instance's network during creation. + required: false + default: null + ip6_address: + description: + - IPv6 address for default instance's network. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + required: false + default: null + disk_size: + description: + - Disk size in GByte required if deploying instance from ISO. + required: false + default: null + security_groups: + description: + - List of security groups the instance to be applied to. + required: false + default: [] + aliases: [ 'security_group' ] + domain: + description: + - Domain the instance is related to. + required: false + default: null + account: + description: + - Account the instance is related to. + required: false + default: null + project: + description: + - Name of the project the instance to be deployed in. + required: false + default: null + zone: + description: + - Name of the zone in which the instance shoud be deployed. + - If not set, default zone is used. + required: false + default: null + ssh_key: + description: + - Name of the SSH key to be deployed on the new instance. + required: false + default: null + affinity_groups: + description: + - Affinity groups names to be applied to the new instance. + required: false + default: [] + aliases: [ 'affinity_group' ] + user_data: + description: + - Optional data (ASCII) that can be sent to the instance upon a successful deployment. + - The data will be automatically base64 encoded. + - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. + required: false + default: null + force: + description: + - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. + required: false + default: false + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - "If you want to delete all tags, set a empty list e.g. C(tags: [])." + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a instance from an ISO +# NOTE: Names of offerings and ISOs depending on the CloudStack configuration. +- local_action: + module: cs_instance + name: web-vm-1 + iso: Linux Debian 7 64-bit + hypervisor: VMware + project: Integration + zone: ch-zrh-ix-01 + service_offering: 1cpu_1gb + disk_offering: PerfPlus Storage + disk_size: 20 + networks: + - Server Integration + - Sync Integration + - Storage Integration + +# For changing a running instance, use the 'force' parameter +- local_action: + module: cs_instance + name: web-vm-1 + display_name: web-vm-01.example.com + iso: Linux Debian 7 64-bit + service_offering: 2cpu_2gb + force: yes + +# Create or update a instance on Exoscale's public cloud +- local_action: + module: cs_instance + name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ssh_key: john@example.com + tags: + - { key: admin, value: john } + - { key: foo, value: bar } + +# Ensure a instance has stopped +- local_action: cs_instance name=web-vm-1 state=stopped + +# Ensure a instance is running +- local_action: cs_instance name=web-vm-1 state=started + +# Remove a instance +- local_action: cs_instance name=web-vm-1 state=absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: string + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: string + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: string + sample: web +created: + description: Date of the instance was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: boolean + sample: true +password: + description: The password of the instance if exists. + returned: success + type: string + sample: Ge2oe7Do +ssh_key: + description: Name of SSH key deployed to instance. + returned: success + type: string + sample: key@work +domain: + description: Domain the instance is related to. + returned: success + type: string + sample: example domain +account: + description: Account the instance is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the instance is related to. + returned: success + type: string + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: string + sample: 10.23.37.42 +public_ip: + description: Public IP address with instance via static NAT rule. + returned: success + type: string + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +service_offering: + description: Name of the service offering the instance has. + returned: success + type: string + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: string + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: string + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +hypervisor: + description: Hypervisor related to this instance. + returned: success + type: string + sample: KVM +instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: string + sample: i-44-3992-VM +''' + +import base64 + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstance(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance = None + self.template = None + self.iso = None + + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + + service_offerings = self.cs.listServiceOfferings() + if service_offerings: + if not service_offering: + return service_offerings['serviceoffering'][0]['id'] + + for s in service_offerings['serviceoffering']: + if service_offering in [ s['name'], s['id'] ]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + + def get_template_or_iso(self, key=None): + template = self.module.params.get('template') + iso = self.module.params.get('iso') + + if not template and not iso: + self.module.fail_json(msg="Template or ISO is required.") + + if template and iso: + self.module.fail_json(msg="Template are ISO are mutually exclusive.") + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + if template: + if self.template: + return self._get_by_key(key, self.template) + + args['templatefilter'] = 'executable' + templates = self.cs.listTemplates(**args) + if templates: + for t in templates['template']: + if template in [ t['displaytext'], t['name'], t['id'] ]: + self.template = t + return self._get_by_key(key, self.template) + self.module.fail_json(msg="Template '%s' not found" % template) + + elif iso: + if self.iso: + return self._get_by_key(key, self.iso) + args['isofilter'] = 'executable' + isos = self.cs.listIsos(**args) + if isos: + for i in isos['iso']: + if iso in [ i['displaytext'], i['name'], i['id'] ]: + self.iso = i + return self._get_by_key(key, self.iso) + self.module.fail_json(msg="ISO '%s' not found" % iso) + + + def get_disk_offering_id(self): + disk_offering = self.module.params.get('disk_offering') + + if not disk_offering: + return None + + args = {} + args['domainid'] = self.get_domain('id') + + disk_offerings = self.cs.listDiskOfferings(**args) + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: + return d['id'] + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + instances = self.cs.listVirtualMachines(**args) + if instances: + for v in instances['virtualmachine']: + if instance_name in [ v['name'], v['displayname'], v['id'] ]: + self.instance = v + break + return self.instance + + + def get_network_ids(self): + network_names = self.module.params.get('networks') + if not network_names: + return None + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + network_ids = [] + network_displaytexts = [] + for network_name in network_names: + for n in networks['network']: + if network_name in [ n['displaytext'], n['name'], n['id'] ]: + network_ids.append(n['id']) + network_displaytexts.append(n['name']) + break + + if len(network_ids) != len(network_names): + self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) + + return ','.join(network_ids) + + + def present_instance(self): + instance = self.get_instance() + if not instance: + instance = self.deploy_instance() + else: + instance = self.update_instance(instance) + + instance = self.ensure_tags(resource=instance, resource_type='UserVm') + + return instance + + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data: + user_data = base64.b64encode(user_data) + return user_data + + + def get_display_name(self): + display_name = self.module.params.get('display_name') + if not display_name: + display_name = self.module.params.get('name') + return display_name + + + def deploy_instance(self): + self.result['changed'] = True + + args = {} + args['templateid'] = self.get_template_or_iso(key='id') + args['zoneid'] = self.get_zone('id') + args['serviceofferingid'] = self.get_service_offering_id() + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['diskofferingid'] = self.get_disk_offering_id() + args['networkids'] = self.get_network_ids() + args['userdata'] = self.get_user_data() + args['keyboard'] = self.module.params.get('keyboard') + args['ipaddress'] = self.module.params.get('ip_address') + args['ip6address'] = self.module.params.get('ip6_address') + args['name'] = self.module.params.get('name') + args['group'] = self.module.params.get('group') + args['keypair'] = self.module.params.get('ssh_key') + args['size'] = self.module.params.get('disk_size') + args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) + args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + + template_iso = self.get_template_or_iso() + if 'hypervisor' not in template_iso: + args['hypervisor'] = self.get_hypervisor() + + instance = None + if not self.module.check_mode: + instance = self.cs.deployVirtualMachine(**args) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def update_instance(self, instance): + args_service_offering = {} + args_service_offering['id'] = instance['id'] + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + + args_instance_update = {} + args_instance_update['id'] = instance['id'] + args_instance_update['group'] = self.module.params.get('group') + args_instance_update['displayname'] = self.get_display_name() + args_instance_update['userdata'] = self.get_user_data() + args_instance_update['ostypeid'] = self.get_os_type('id') + + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + args_ssh_key['projectid'] = self.get_project('id') + + if self._has_changed(args_service_offering, instance) or \ + self._has_changed(args_instance_update, instance) or \ + self._has_changed(args_ssh_key, instance): + + force = self.module.params.get('force') + instance_state = instance['state'].lower() + + if instance_state == 'stopped' or force: + self.result['changed'] = True + if not self.module.check_mode: + + # Ensure VM has stopped + instance = self.stop_instance() + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Change service offering + if self._has_changed(args_service_offering, instance): + res = self.cs.changeServiceForVirtualMachine(**args_service_offering) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Update VM + if self._has_changed(args_instance_update, instance): + res = self.cs.updateVirtualMachine(**args_instance_update) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Reset SSH key + if self._has_changed(args_ssh_key, instance): + instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Start VM again if it was running before + if instance_state == 'running': + instance = self.start_instance() + return instance + + + def absent_instance(self): + instance = self.get_instance() + if instance: + if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def expunge_instance(self): + instance = self.get_instance() + if instance: + res = {} + if instance['state'].lower() in [ 'destroying', 'destroyed' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.expungeVirtualMachine(id=instance['id']) + + elif instance['state'].lower() not in [ 'expunging' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def stop_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in ['stopping', 'stopped']: + return instance + + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.stopVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def start_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name')) + + if instance['state'].lower() in ['starting', 'running']: + return instance + + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.startVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def restart_instance(self): + instance = self.get_instance() + if not instance: + module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in [ 'running', 'starting' ]: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.rebootVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + + elif instance['state'].lower() in [ 'stopping', 'stopped' ]: + instance = self.start_instance() + return instance + + + def get_result(self, instance): + if instance: + if 'id' in instance: + self.result['id'] = instance['id'] + if 'name' in instance: + self.result['name'] = instance['name'] + if 'displayname' in instance: + self.result['display_name'] = instance['displayname'] + if 'group' in instance: + self.result['group'] = instance['group'] + if 'domain' in instance: + self.result['domain'] = instance['domain'] + if 'account' in instance: + self.result['account'] = instance['account'] + if 'project' in instance: + self.result['project'] = instance['project'] + if 'hypervisor' in instance: + self.result['hypervisor'] = instance['hypervisor'] + if 'instancename' in instance: + self.result['instance_name'] = instance['instancename'] + if 'publicip' in instance: + self.result['public_ip'] = instance['public_ip'] + if 'passwordenabled' in instance: + self.result['password_enabled'] = instance['passwordenabled'] + if 'password' in instance: + self.result['password'] = instance['password'] + if 'serviceofferingname' in instance: + self.result['service_offering'] = instance['serviceofferingname'] + if 'zonename' in instance: + self.result['zone'] = instance['zonename'] + if 'templatename' in instance: + self.result['template'] = instance['templatename'] + if 'isoname' in instance: + self.result['iso'] = instance['isoname'] + if 'keypair' in instance: + self.result['ssh_key'] = instance['keypair'] + if 'created' in instance: + self.result['created'] = instance['created'] + if 'state' in instance: + self.result['state'] = instance['state'] + if 'tags' in instance: + self.result['tags'] = [] + for tag in instance['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault']: + self.result['default_ip'] = nic['ipaddress'] + return self.result + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + display_name = dict(default=None), + group = dict(default=None), + state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering = dict(default=None), + template = dict(default=None), + iso = dict(default=None), + networks = dict(type='list', aliases=[ 'network' ], default=None), + ip_address = dict(defaul=None), + ip6_address = dict(defaul=None), + disk_offering = dict(default=None), + disk_size = dict(type='int', default=None), + keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), + security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), + affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + user_data = dict(default=None), + zone = dict(default=None), + ssh_key = dict(default=None), + force = dict(choices=BOOLEANS, default=False), + tags = dict(type='list', aliases=[ 'tag' ], default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_instance = AnsibleCloudStackInstance(module) + + state = module.params.get('state') + + if state in ['absent', 'destroyed']: + instance = acs_instance.absent_instance() + + elif state in ['expunged']: + instance = acs_instance.expunge_instance() + + elif state in ['present', 'deployed']: + instance = acs_instance.present_instance() + + elif state in ['stopped']: + instance = acs_instance.stop_instance() + + elif state in ['started']: + instance = acs_instance.start_instance() + + elif state in ['restarted']: + instance = acs_instance.restart_instance() + + if instance and 'state' in instance and instance['state'].lower() == 'error': + module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) + + result = acs_instance.get_result(instance) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py new file mode 100644 index 00000000000..478748aeec3 --- /dev/null +++ b/cloud/cloudstack/cs_instancegroup.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instancegroup +short_description: Manages instance groups on Apache CloudStack based clouds. +description: + - Create and remove instance groups. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the instance group. + required: true + domain: + description: + - Domain the instance group is related to. + required: false + default: null + account: + description: + - Account the instance group is related to. + required: false + default: null + project: + description: + - Project the instance group is related to. + required: false + default: null + state: + description: + - State of the instance group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create an instance group +- local_action: + module: cs_instancegroup + name: loadbalancers + +# Remove an instance group +- local_action: + module: cs_instancegroup + name: loadbalancers + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance group. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance group. + returned: success + type: string + sample: webservers +created: + description: Date when the instance group was created. + returned: success + type: string + sample: 2015-05-03T15:05:51+0200 +domain: + description: Domain the instance group is related to. + returned: success + type: string + sample: example domain +account: + description: Account the instance group is related to. + returned: success + type: string + sample: example account +project: + description: Project the instance group is related to. + returned: success + type: string + sample: example project +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstanceGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance_group = None + + + def get_instance_group(self): + if self.instance_group: + return self.instance_group + + name = self.module.params.get('name') + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + instance_groups = self.cs.listInstanceGroups(**args) + if instance_groups: + for g in instance_groups['instancegroup']: + if name in [ g['name'], g['id'] ]: + self.instance_group = g + break + return self.instance_group + + + def present_instance_group(self): + instance_group = self.get_instance_group() + if not instance_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + if not self.module.check_mode: + res = self.cs.createInstanceGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance_group = res['instancegroup'] + return instance_group + + + def absent_instance_group(self): + instance_group = self.get_instance_group() + if instance_group: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteInstanceGroup(id=instance_group['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return instance_group + + + def get_result(self, instance_group): + if instance_group: + if 'id' in instance_group: + self.result['id'] = instance_group['id'] + if 'created' in instance_group: + self.result['created'] = instance_group['created'] + if 'name' in instance_group: + self.result['name'] = instance_group['name'] + if 'project' in instance_group: + self.result['project'] = instance_group['project'] + if 'domain' in instance_group: + self.result['domain'] = instance_group['domain'] + if 'account' in instance_group: + self.result['account'] = instance_group['account'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(default='present', choices=['present', 'absent']), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ig = AnsibleCloudStackInstanceGroup(module) + + state = module.params.get('state') + if state in ['absent']: + instance_group = acs_ig.absent_instance_group() + else: + instance_group = acs_ig.present_instance_group() + + result = acs_ig.get_result(instance_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py new file mode 100644 index 00000000000..e3ba322f6ba --- /dev/null +++ b/cloud/cloudstack/cs_iso.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_iso +short_description: Manages ISOs images on Apache CloudStack based clouds. +description: + - Register and remove ISO images. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the ISO. + required: true + url: + description: + - URL where the ISO can be downloaded from. Required if C(state) is present. + required: false + default: null + os_type: + description: + - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present. + required: false + default: null + is_ready: + description: + - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false). + required: false + default: false + aliases: [] + is_public: + description: + - Register the ISO to be publicly available to all users. Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the ISO to be featured. Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present. + required: false + default: false + aliases: [] + checksum: + description: + - The MD5 checksum value of this ISO. If set, we search by checksum instead of name. + required: false + default: false + bootable: + description: + - Register the ISO to be bootable. Only used if C(state) is present. + required: false + default: true + domain: + description: + - Domain the ISO is related to. + required: false + default: null + account: + description: + - Account the ISO is related to. + required: false + default: null + project: + description: + - Name of the project the ISO to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used. + required: false + default: null + iso_filter: + description: + - Name of the filter used to search for the ISO. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ] + state: + description: + - State of the ISO. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Register an ISO if ISO name does not already exist. +- local_action: + module: cs_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + +# Register an ISO with given name if ISO md5 checksum does not already exist. +- local_action: + module: cs_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + checksum: 0b31bccccb048d20b551f70830bb7ad0 + +# Remove an ISO by name +- local_action: + module: cs_iso + name: Debian 7 64-bit + state: absent + +# Remove an ISO by checksum +- local_action: + module: cs_iso + name: Debian 7 64-bit + checksum: 0b31bccccb048d20b551f70830bb7ad0 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the ISO. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Text to be displayed of the ISO. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +zone: + description: Name of zone the ISO is registered in. + returned: success + type: string + sample: zuerich +status: + description: Status of the ISO. + returned: success + type: string + sample: Successfully Installed +is_ready: + description: True if the ISO is ready to be deployed from. + returned: success + type: boolean + sample: true +checksum: + description: MD5 checksum of the ISO. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +domain: + description: Domain the ISO is related to. + returned: success + type: string + sample: example domain +account: + description: Account the ISO is related to. + returned: success + type: string + sample: example account +project: + description: Project the ISO is related to. + returned: success + type: string + sample: example project +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackIso(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.iso = None + + def register_iso(self): + iso = self.get_iso() + if not iso: + + args = {} + args['zoneid'] = self.get_zone('id') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['bootable'] = self.module.params.get('bootable') + args['ostypeid'] = self.get_os_type('id') + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('name') + args['checksum'] = self.module.params.get('checksum') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + + if args['bootable'] and not args['ostypeid']: + self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") + + args['url'] = self.module.params.get('url') + if not args['url']: + self.module.fail_json(msg="URL is requried.") + + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.registerIso(**args) + iso = res['iso'][0] + return iso + + + def get_iso(self): + if not self.iso: + + args = {} + args['isready'] = self.module.params.get('is_ready') + args['isofilter'] = self.module.params.get('iso_filter') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + isos = self.cs.listIsos(**args) + if isos: + if not checksum: + self.iso = isos['iso'][0] + else: + for i in isos['iso']: + if i['checksum'] == checksum: + self.iso = i + break + return self.iso + + + def remove_iso(self): + iso = self.get_iso() + if iso: + self.result['changed'] = True + + args = {} + args['id'] = iso['id'] + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + if not self.module.check_mode: + res = self.cs.deleteIso(**args) + return iso + + + def get_result(self, iso): + if iso: + if 'displaytext' in iso: + self.result['displaytext'] = iso['displaytext'] + if 'name' in iso: + self.result['name'] = iso['name'] + if 'zonename' in iso: + self.result['zone'] = iso['zonename'] + if 'checksum' in iso: + self.result['checksum'] = iso['checksum'] + if 'status' in iso: + self.result['status'] = iso['status'] + if 'isready' in iso: + self.result['is_ready'] = iso['isready'] + if 'created' in iso: + self.result['created'] = iso['created'] + if 'project' in iso: + self.result['project'] = iso['project'] + if 'domain' in iso: + self.result['domain'] = iso['domain'] + if 'account' in iso: + self.result['account'] = iso['account'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + url = dict(default=None), + os_type = dict(default=None), + zone = dict(default=None), + iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + checksum = dict(default=None), + is_ready = dict(choices=BOOLEANS, default=False), + bootable = dict(choices=BOOLEANS, default=True), + is_featured = dict(choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_iso = AnsibleCloudStackIso(module) + + state = module.params.get('state') + if state in ['absent']: + iso = acs_iso.remove_iso() + else: + iso = acs_iso.register_iso() + + result = acs_iso.get_result(iso) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py new file mode 100644 index 00000000000..b602b345677 --- /dev/null +++ b/cloud/cloudstack/cs_network.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_network +short_description: Manages networks on Apache CloudStack based clouds. +description: + - Create, update, restart and delete networks. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name (case sensitive) of the network. + required: true + displaytext: + description: + - Displaytext of the network. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + network_offering: + description: + - Name of the offering for the network. + - Required if C(state=present). + required: false + default: null + start_ip: + description: + - The beginning IPv4 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ip: + description: + - The ending IPv4 address of the network belongs to. + - If not specified, value of C(start_ip) is used. + - Only considered on create. + required: false + default: null + gateway: + description: + - The gateway of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + netmask: + description: + - The netmask of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + start_ipv6: + description: + - The beginning IPv6 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ipv6: + description: + - The ending IPv6 address of the network belongs to. + - If not specified, value of C(start_ipv6) is used. + - Only considered on create. + required: false + default: null + cidr_ipv6: + description: + - CIDR of IPv6 network, must be at least /64. + - Only considered on create. + required: false + default: null + gateway_ipv6: + description: + - The gateway of the IPv6 network. + - Required for shared networks. + - Only considered on create. + required: false + default: null + vlan: + description: + - The ID or VID of the network. + required: false + default: null + vpc: + description: + - The ID or VID of the network. + required: false + default: null + isolated_pvlan: + description: + - The isolated private vlan for this network. + required: false + default: null + clean_up: + description: + - Cleanup old network elements. + - Only considered on C(state=restarted). + required: false + default: false + acl_type: + description: + - Access control type. + - Only considered on create. + required: false + default: account + choices: [ 'account', 'domain' ] + network_domain: + description: + - The network domain. + required: false + default: null + state: + description: + - State of the network. + required: false + default: present + choices: [ 'present', 'absent', 'restarted' ] + zone: + description: + - Name of the zone in which the network should be deployed. + - If not set, default zone is used. + required: false + default: null + project: + description: + - Name of the project the network to be deployed in. + required: false + default: null + domain: + description: + - Domain the network is related to. + required: false + default: null + account: + description: + - Account the network is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a network +- local_action: + module: cs_network + name: my network + zone: gva-01 + network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService + network_domain: example.com + +# update a network +- local_action: + module: cs_network + name: my network + displaytext: network of domain example.local + network_domain: example.local + +# restart a network with clean up +- local_action: + module: cs_network + name: my network + clean_up: yes + state: restared + +# remove a network +- local_action: + module: cs_network + name: my network + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the network. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the network. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the network. + returned: success + type: string + sample: web project +dns1: + description: IP address of the 1st nameserver. + returned: success + type: string + sample: 1.2.3.4 +dns2: + description: IP address of the 2nd nameserver. + returned: success + type: string + sample: 1.2.3.4 +cidr: + description: IPv4 network CIDR. + returned: success + type: string + sample: 10.101.64.0/24 +gateway: + description: IPv4 gateway. + returned: success + type: string + sample: 10.101.64.1 +netmask: + description: IPv4 netmask. + returned: success + type: string + sample: 255.255.255.0 +cidr_ipv6: + description: IPv6 network CIDR. + returned: success + type: string + sample: 2001:db8::/64 +gateway_ipv6: + description: IPv6 gateway. + returned: success + type: string + sample: 2001:db8::1 +state: + description: State of the network. + returned: success + type: string + sample: Implemented +zone: + description: Name of zone. + returned: success + type: string + sample: ch-gva-2 +domain: + description: Domain the network is related to. + returned: success + type: string + sample: ROOT +account: + description: Account the network is related to. + returned: success + type: string + sample: example account +project: + description: Name of project. + returned: success + type: string + sample: Production +tags: + description: List of resource tags associated with the network. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +acl_type: + description: Access type of the network (Domain, Account). + returned: success + type: string + sample: Account +broadcast_domaintype: + description: Broadcast domain type of the network. + returned: success + type: string + sample: Vlan +type: + description: Type of the network. + returned: success + type: string + sample: Isolated +traffic_type: + description: Traffic type of the network. + returned: success + type: string + sample: Guest +state: + description: State of the network (Allocated, Implemented, Setup). + returned: success + type: string + sample: Allocated +is_persistent: + description: Whether the network is persistent or not. + returned: success + type: boolean + sample: false +network_domain: + description: The network domain + returned: success + type: string + sample: example.local +network_offering: + description: The network offering name. + returned: success + type: string + sample: DefaultIsolatedNetworkOfferingWithSourceNatService +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackNetwork(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.network = None + + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + + def get_vpc(self, key=None): + vpc = self.module.params.get('vpc') + if not vpc: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') + + vpcs = self.cs.listVPCs(**args) + if vpcs: + for v in vpcs['vpc']: + if vpc in [ v['name'], v['displaytext'], v['id'] ]: + return self._get_by_key(key, v) + self.module.fail_json(msg="VPC '%s' not found" % vpc) + + + def get_network_offering(self, key=None): + network_offering = self.module.params.get('network_offering') + if not network_offering: + self.module.fail_json(msg="missing required arguments: network_offering") + + args = {} + args['zoneid'] = self.get_zone(key='id') + + network_offerings = self.cs.listNetworkOfferings(**args) + if network_offerings: + for no in network_offerings['networkoffering']: + if network_offering in [ no['name'], no['displaytext'], no['id'] ]: + return self._get_by_key(key, no) + self.module.fail_json(msg="Network offering '%s' not found" % network_offering) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_or_fallback('displaytext','name') + args['networkdomain'] = self.module.params.get('network_domain') + args['networkofferingid'] = self.get_network_offering(key='id') + return args + + + def get_network(self): + if not self.network: + network = self.module.params.get('name') + + args = {} + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + networks = self.cs.listNetworks(**args) + if networks: + for n in networks['network']: + if network in [ n['name'], n['displaytext'], n['id']]: + self.network = n + break + return self.network + + + def present_network(self): + network = self.get_network() + if not network: + network = self.create_network(network) + else: + network = self.update_network(network) + return network + + + def update_network(self, network): + args = self._get_args() + args['id'] = network['id'] + + if self._has_changed(args, network): + self.result['changed'] = True + if not self.module.check_mode: + network = self.cs.updateNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def create_network(self, network): + self.result['changed'] = True + + args = self._get_args() + args['acltype'] = self.module.params.get('acl_type') + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['startip'] = self.module.params.get('start_ip') + args['endip'] = self.get_or_fallback('end_ip', 'start_ip') + args['netmask'] = self.module.params.get('netmask') + args['gateway'] = self.module.params.get('gateway') + args['startipv6'] = self.module.params.get('start_ipv6') + args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6') + args['ip6cidr'] = self.module.params.get('cidr_ipv6') + args['ip6gateway'] = self.module.params.get('gateway_ipv6') + args['vlan'] = self.module.params.get('vlan') + args['isolatedpvlan'] = self.module.params.get('isolated_pvlan') + args['subdomainaccess'] = self.module.params.get('subdomain_access') + args['vpcid'] = self.get_vpc(key='id') + + if not self.module.check_mode: + res = self.cs.createNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + network = res['network'] + return network + + + def restart_network(self): + network = self.get_network() + + if not network: + self.module.fail_json(msg="No network named '%s' found." % self.module.params('name')) + + # Restarting only available for these states + if network['state'].lower() in [ 'implemented', 'setup' ]: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + args['cleanup'] = self.module.params.get('clean_up') + + if not self.module.check_mode: + network = self.cs.restartNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def absent_network(self): + network = self.get_network() + if network: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + + if not self.module.check_mode: + res = self.cs.deleteNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'network') + return network + + + def get_result(self, network): + if network: + if 'id' in network: + self.result['id'] = network['id'] + if 'name' in network: + self.result['name'] = network['name'] + if 'displaytext' in network: + self.result['displaytext'] = network['displaytext'] + if 'dns1' in network: + self.result['dns1'] = network['dns1'] + if 'dns2' in network: + self.result['dns2'] = network['dns2'] + if 'cidr' in network: + self.result['cidr'] = network['cidr'] + if 'broadcastdomaintype' in network: + self.result['broadcast_domaintype'] = network['broadcastdomaintype'] + if 'netmask' in network: + self.result['netmask'] = network['netmask'] + if 'gateway' in network: + self.result['gateway'] = network['gateway'] + if 'ip6cidr' in network: + self.result['cidr_ipv6'] = network['ip6cidr'] + if 'ip6gateway' in network: + self.result['gateway_ipv6'] = network['ip6gateway'] + if 'state' in network: + self.result['state'] = network['state'] + if 'type' in network: + self.result['type'] = network['type'] + if 'traffictype' in network: + self.result['traffic_type'] = network['traffictype'] + if 'zone' in network: + self.result['zone'] = network['zonename'] + if 'domain' in network: + self.result['domain'] = network['domain'] + if 'account' in network: + self.result['account'] = network['account'] + if 'project' in network: + self.result['project'] = network['project'] + if 'acltype' in network: + self.result['acl_type'] = network['acltype'] + if 'networkdomain' in network: + self.result['network_domain'] = network['networkdomain'] + if 'networkofferingname' in network: + self.result['network_offering'] = network['networkofferingname'] + if 'ispersistent' in network: + self.result['is_persistent'] = network['ispersistent'] + if 'tags' in network: + self.result['tags'] = [] + for tag in network['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + network_offering = dict(default=None), + zone = dict(default=None), + start_ip = dict(default=None), + end_ip = dict(default=None), + gateway = dict(default=None), + netmask = dict(default=None), + start_ipv6 = dict(default=None), + end_ipv6 = dict(default=None), + cidr_ipv6 = dict(default=None), + gateway_ipv6 = dict(default=None), + vlan = dict(default=None), + vpc = dict(default=None), + isolated_pvlan = dict(default=None), + clean_up = dict(type='bool', choices=BOOLEANS, default=False), + network_domain = dict(default=None), + state = dict(choices=['present', 'absent', 'restarted' ], default='present'), + acl_type = dict(choices=['account', 'domain'], default='account'), + project = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['start_ip', 'netmask', 'gateway'], + ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_network = AnsibleCloudStackNetwork(module) + + state = module.params.get('state') + if state in ['absent']: + network = acs_network.absent_network() + + elif state in ['restarted']: + network = acs_network.restart_network() + + else: + network = acs_network.present_network() + + result = acs_network.get_result(network) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py new file mode 100644 index 00000000000..3b88ca85723 --- /dev/null +++ b/cloud/cloudstack/cs_portforward.py @@ -0,0 +1,437 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_portforward +short_description: Manages port forwarding rules on Apache CloudStack based clouds. +description: + - Create, update and remove port forwarding rules. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + ip_address: + description: + - Public IP address the rule is assigned to. + required: true + vm: + description: + - Name of virtual machine which we make the port forwarding rule for. + - Required if C(state=present). + required: false + default: null + state: + description: + - State of the port forwarding rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the port forwarding rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp' ] + public_port: + description: + - Start public port for this rule. + required: true + public_end_port: + description: + - End public port for this rule. + - If not specified equal C(public_port). + required: false + default: null + private_port: + description: + - Start private port for this rule. + required: true + private_end_port: + description: + - End private port for this rule. + - If not specified equal C(private_port). + required: false + default: null + open_firewall: + description: + - Whether the firewall rule for public port should be created, while creating the new rule. + - Use M(cs_firewall) for managing firewall rules. + required: false + default: false + vm_guest_ip: + description: + - VM guest NIC secondary IP address for the port forwarding rule. + required: false + default: false + domain: + description: + - Domain the C(vm) is related to. + required: false + default: null + account: + description: + - Account the C(vm) is related to. + required: false + default: null + project: + description: + - Name of the project the C(vm) is located in. + required: false + default: null + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# 1.2.3.4:80 -> web01:8080 +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + vm: web01 + public_port: 80 + private_port: 8080 + +# forward SSH and open firewall +- local_action: + module: cs_portforward + ip_address: '{{ public_ip }}' + vm: '{{ inventory_hostname }}' + public_port: '{{ ansible_ssh_port }}' + private_port: 22 + open_firewall: true + +# forward DNS traffic, but do not open firewall +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + vm: '{{ inventory_hostname }}' + public_port: 53 + private_port: 53 + protocol: udp + open_firewall: true + +# remove ssh port forwarding +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + public_port: 22 + private_port: 22 + state: absent +''' + +RETURN = ''' +--- +ip_address: + description: Public IP address. + returned: success + type: string + sample: 1.2.3.4 +protocol: + description: Protocol. + returned: success + type: string + sample: tcp +private_port: + description: Start port on the virtual machine's IP address. + returned: success + type: int + sample: 80 +private_end_port: + description: End port on the virtual machine's IP address. + returned: success + type: int +public_port: + description: Start port on the public IP address. + returned: success + type: int + sample: 80 +public_end_port: + description: End port on the public IP address. + returned: success + type: int + sample: 80 +tags: + description: Tags related to the port forwarding. + returned: success + type: list + sample: [] +vm_name: + description: Name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_display_name: + description: Display name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_guest_ip: + description: IP of the virtual machine. + returned: success + type: string + sample: 10.101.65.152 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackPortforwarding(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.portforwarding_rule = None + self.vm_default_nic = None + + + def get_public_end_port(self): + if not self.module.params.get('public_end_port'): + return self.module.params.get('public_port') + return self.module.params.get('public_end_port') + + + def get_private_end_port(self): + if not self.module.params.get('private_end_port'): + return self.module.params.get('private_port') + return self.module.params.get('private_end_port') + + + def get_vm_guest_ip(self): + vm_guest_ip = self.module.params.get('vm_guest_ip') + default_nic = self.get_vm_default_nic() + + if not vm_guest_ip: + return default_nic['ipaddress'] + + for secondary_ip in default_nic['secondaryip']: + if vm_guest_ip == secondary_ip['ipaddress']: + return vm_guest_ip + self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) + + + def get_vm_default_nic(self): + if self.vm_default_nic: + return self.vm_default_nic + + nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) + if nics: + for n in nics['nic']: + if n['isdefault']: + self.vm_default_nic = n + return self.vm_default_nic + self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) + + + def get_portforwarding_rule(self): + if not self.portforwarding_rule: + protocol = self.module.params.get('protocol') + public_port = self.module.params.get('public_port') + public_end_port = self.get_public_end_port() + private_port = self.module.params.get('private_port') + private_end_port = self.get_public_end_port() + + args = {} + args['ipaddressid'] = self.get_ip_address(key='id') + args['projectid'] = self.get_project(key='id') + portforwarding_rules = self.cs.listPortForwardingRules(**args) + + if portforwarding_rules and 'portforwardingrule' in portforwarding_rules: + for rule in portforwarding_rules['portforwardingrule']: + if protocol == rule['protocol'] \ + and public_port == int(rule['publicport']): + self.portforwarding_rule = rule + break + return self.portforwarding_rule + + + def present_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + if portforwarding_rule: + portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule) + else: + portforwarding_rule = self.create_portforwarding_rule() + return portforwarding_rule + + + def create_portforwarding_rule(self): + args = {} + args['protocol'] = self.module.params.get('protocol') + args['publicport'] = self.module.params.get('public_port') + args['publicendport'] = self.get_public_end_port() + args['privateport'] = self.module.params.get('private_port') + args['privateendport'] = self.get_private_end_port() + args['openfirewall'] = self.module.params.get('open_firewall') + args['vmguestip'] = self.get_vm_guest_ip() + args['ipaddressid'] = self.get_ip_address(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + + portforwarding_rule = None + self.result['changed'] = True + if not self.module.check_mode: + portforwarding_rule = self.cs.createPortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + + def update_portforwarding_rule(self, portforwarding_rule): + args = {} + args['protocol'] = self.module.params.get('protocol') + args['publicport'] = self.module.params.get('public_port') + args['publicendport'] = self.get_public_end_port() + args['privateport'] = self.module.params.get('private_port') + args['privateendport'] = self.get_private_end_port() + args['openfirewall'] = self.module.params.get('open_firewall') + args['vmguestip'] = self.get_vm_guest_ip() + args['ipaddressid'] = self.get_ip_address(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + + if self._has_changed(args, portforwarding_rule): + self.result['changed'] = True + if not self.module.check_mode: + # API broken in 4.2.1?, workaround using remove/create instead of update + # portforwarding_rule = self.cs.updatePortForwardingRule(**args) + self.absent_portforwarding_rule() + portforwarding_rule = self.cs.createPortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + + def absent_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + + if portforwarding_rule: + self.result['changed'] = True + args = {} + args['id'] = portforwarding_rule['id'] + + if not self.module.check_mode: + res = self.cs.deletePortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self._poll_job(res, 'portforwardingrule') + return portforwarding_rule + + + def get_result(self, portforwarding_rule): + if portforwarding_rule: + if 'id' in portforwarding_rule: + self.result['id'] = portforwarding_rule['id'] + if 'virtualmachinedisplayname' in portforwarding_rule: + self.result['vm_display_name'] = portforwarding_rule['virtualmachinedisplayname'] + if 'virtualmachinename' in portforwarding_rule: + self.result['vm_name'] = portforwarding_rule['virtualmachinename'] + if 'ipaddress' in portforwarding_rule: + self.result['ip_address'] = portforwarding_rule['ipaddress'] + if 'vmguestip' in portforwarding_rule: + self.result['vm_guest_ip'] = portforwarding_rule['vmguestip'] + if 'publicport' in portforwarding_rule: + self.result['public_port'] = int(portforwarding_rule['publicport']) + if 'publicendport' in portforwarding_rule: + self.result['public_end_port'] = int(portforwarding_rule['publicendport']) + if 'privateport' in portforwarding_rule: + self.result['private_port'] = int(portforwarding_rule['privateport']) + if 'privateendport' in portforwarding_rule: + self.result['private_end_port'] = int(portforwarding_rule['privateendport']) + if 'protocol' in portforwarding_rule: + self.result['protocol'] = portforwarding_rule['protocol'] + if 'tags' in portforwarding_rule: + self.result['tags'] = [] + for tag in portforwarding_rule['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True), + protocol= dict(choices=['tcp', 'udp'], default='tcp'), + public_port = dict(type='int', required=True), + public_end_port = dict(type='int', default=None), + private_port = dict(type='int', required=True), + private_end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + open_firewall = dict(choices=BOOLEANS, default=False), + vm_guest_ip = dict(default=None), + vm = dict(default=None), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_pf = AnsibleCloudStackPortforwarding(module) + state = module.params.get('state') + if state in ['absent']: + pf_rule = acs_pf.absent_portforwarding_rule() + else: + pf_rule = acs_pf.present_portforwarding_rule() + + result = acs_pf.get_result(pf_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py new file mode 100644 index 00000000000..0f391bc5005 --- /dev/null +++ b/cloud/cloudstack/cs_project.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_project +short_description: Manages projects on Apache CloudStack based clouds. +description: + - Create, update, suspend, activate and remove projects. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the project. + required: true + displaytext: + description: + - Displaytext of the project. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + state: + description: + - State of the project. + required: false + default: 'present' + choices: [ 'present', 'absent', 'active', 'suspended' ] + domain: + description: + - Domain the project is related to. + required: false + default: null + account: + description: + - Account the project is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a project +- local_action: + module: cs_project + name: web + +# Rename a project +- local_action: + module: cs_project + name: web + displaytext: my web project + +# Suspend an existing project +- local_action: + module: cs_project + name: web + state: suspended + +# Activate an existing project +- local_action: + module: cs_project + name: web + state: active + +# Remove a project +- local_action: + module: cs_project + name: web + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the project. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the project. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the project. + returned: success + type: string + sample: web project +state: + description: State of the project. + returned: success + type: string + sample: Active +domain: + description: Domain the project is related to. + returned: success + type: string + sample: example domain +account: + description: Account the project is related to. + returned: success + type: string + sample: example account +tags: + description: List of resource tags associated with the project. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackProject(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.project = None + + + def get_displaytext(self): + displaytext = self.module.params.get('displaytext') + if not displaytext: + displaytext = self.module.params.get('name') + return displaytext + + + def get_project(self): + if not self.project: + project = self.module.params.get('name') + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + projects = self.cs.listProjects(**args) + if projects: + for p in projects['project']: + if project.lower() in [ p['name'].lower(), p['id']]: + self.project = p + break + return self.project + + + def present_project(self): + project = self.get_project() + if not project: + project = self.create_project(project) + else: + project = self.update_project(project) + return project + + + def update_project(self, project): + args = {} + args['id'] = project['id'] + args['displaytext'] = self.get_displaytext() + + if self._has_changed(args, project): + self.result['changed'] = True + if not self.module.check_mode: + project = self.cs.updateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def create_project(self, project): + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_displaytext() + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + if not self.module.check_mode: + project = self.cs.createProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def state_project(self, state=None): + project = self.get_project() + + if not project: + self.module.fail_json(msg="No project named '%s' found." % self.module.params('name')) + + if project['state'].lower() != state: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + if state == 'suspended': + project = self.cs.suspendProject(**args) + else: + project = self.cs.activateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def absent_project(self): + project = self.get_project() + if project: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + res = self.cs.deleteProject(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'project') + return project + + + def get_result(self, project): + if project: + if 'name' in project: + self.result['name'] = project['name'] + if 'displaytext' in project: + self.result['displaytext'] = project['displaytext'] + if 'account' in project: + self.result['account'] = project['account'] + if 'domain' in project: + self.result['domain'] = project['domain'] + if 'state' in project: + self.result['state'] = project['state'] + if 'tags' in project: + self.result['tags'] = [] + for tag in project['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_project = AnsibleCloudStackProject(module) + + state = module.params.get('state') + if state in ['absent']: + project = acs_project.absent_project() + + elif state in ['active', 'suspended']: + project = acs_project.state_project(state=state) + + else: + project = acs_project.present_project() + + result = acs_project.get_result(project) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py new file mode 100644 index 00000000000..54a71686a6e --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup +short_description: Manages security groups on Apache CloudStack based clouds. +description: + - Create and remove security groups. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: false + default: null + state: + description: + - State of the security group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + project: + description: + - Name of the project the security group to be created in. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a security group +- local_action: + module: cs_securitygroup + name: default + description: default security group + +# Remove a security group +- local_action: + module: cs_securitygroup + name: default + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of security group. + returned: success + type: string + sample: app +description: + description: Description of security group. + returned: success + type: string + sample: application security group +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.security_group = None + + + def get_security_group(self): + if not self.security_group: + sg_name = self.module.params.get('name') + args = {} + args['projectid'] = self.get_project('id') + sgs = self.cs.listSecurityGroups(**args) + if sgs: + for s in sgs['securitygroup']: + if s['name'] == sg_name: + self.security_group = s + break + return self.security_group + + + def create_security_group(self): + security_group = self.get_security_group() + if not security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project('id') + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + security_group = res['securitygroup'] + + return security_group + + + def remove_security_group(self): + security_group = self.get_security_group() + if security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project('id') + + if not self.module.check_mode: + res = self.cs.deleteSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + return security_group + + + def get_result(self, security_group): + if security_group: + if 'name' in security_group: + self.result['name'] = security_group['name'] + if 'description' in security_group: + self.result['description'] = security_group['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg = AnsibleCloudStackSecurityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + sg = acs_sg.remove_security_group() + else: + sg = acs_sg.create_security_group() + + result = acs_sg.get_result(sg) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py new file mode 100644 index 00000000000..e943e7d11c2 --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup_rule +short_description: Manages security group rules on Apache CloudStack based clouds. +description: + - Add and remove security group rules. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + security_group: + description: + - Name of the security group the rule is related to. The security group must be existing. + required: true + state: + description: + - State of the security group rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the security group rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ] + type: + description: + - Ingress or egress security group rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] + cidr: + description: + - CIDR (full notation) to be used for security group rule. + required: false + default: '0.0.0.0/0' + user_security_group: + description: + - Security group this rule is based of. + required: false + default: null + start_port: + description: + - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + aliases: [ 'port' ] + end_port: + description: + - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. + required: false + default: null + icmp_type: + description: + - Type of the icmp message being sent. Required if C(protocol=icmp). + required: false + default: null + icmp_code: + description: + - Error code for this icmp message. Required if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project the security group to be created in. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + cidr: 1.2.3.4/32 + +# Allow tcp/udp outbound added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + type: egress + start_port: 1 + end_port: 65535 + protocol: '{{ item }}' + with_items: + - tcp + - udp + +# Allow inbound icmp from 0.0.0.0/0 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + protocol: icmp + icmp_code: -1 + icmp_type: -1 + +# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + state: absent + +# Allow inbound port 80/tcp from security group web added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + user_security_group: web +''' + +RETURN = ''' +--- +security_group: + description: security group of the rule. + returned: success + type: string + sample: default +type: + description: type of the rule. + returned: success + type: string + sample: ingress +cidr: + description: CIDR of the rule. + returned: success and cidr is defined + type: string + sample: 0.0.0.0/0 +user_security_group: + description: user security group of the rule. + returned: success and user_security_group is defined + type: string + sample: default +protocol: + description: protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: end port of the rule. + returned: success + type: int + sample: 80 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == int(rule['icmpcode']) \ + and icmp_type == int(rule['icmptype']) + + + def _ah_esp_gre_match(self, rule, protocol): + return protocol in ['ah', 'esp', 'gre'] \ + and protocol == rule['protocol'] + + + def _type_security_group_match(self, rule, security_group_name): + return security_group_name \ + and 'securitygroupname' in rule \ + and security_group_name == rule['securitygroupname'] + + + def _type_cidr_match(self, rule, cidr): + return 'cidr' in rule \ + and cidr == rule['cidr'] + + + def get_end_port(self): + if self.module.params.get('end_port'): + return self.module.params.get('end_port') + return self.module.params.get('start_port') + + + def _get_rule(self, rules): + user_security_group_name = self.module.params.get('user_security_group') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_end_port() + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not (icmp_type and icmp_code): + self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) + + for rule in rules: + if user_security_group_name: + type_match = self._type_security_group_match(rule, user_security_group_name) + else: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._ah_esp_gre_match(rule, protocol) + ) + + if type_match and protocol_match: + return rule + return None + + + def get_security_group(self, security_group_name=None): + if not security_group_name: + security_group_name = self.module.params.get('security_group') + args = {} + args['securitygroupname'] = security_group_name + args['projectid'] = self.get_project('id') + sgs = self.cs.listSecurityGroups(**args) + if not sgs or 'securitygroup' not in sgs: + self.module.fail_json(msg="security group '%s' not found" % security_group_name) + return sgs['securitygroup'][0] + + + def add_rule(self): + security_group = self.get_security_group() + + args = {} + user_security_group_name = self.module.params.get('user_security_group') + + # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. + # that is why we ignore if we have a user_security_group. + if user_security_group_name: + args['usersecuritygrouplist'] = [] + user_security_group = self.get_security_group(user_security_group_name) + args['usersecuritygrouplist'].append({ + 'group': user_security_group['name'], + 'account': user_security_group['account'], + }) + else: + args['cidrlist'] = self.module.params.get('cidr') + + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.get_end_port() + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['projectid'] = self.get_project('id') + args['securitygroupid'] = security_group['id'] + + rule = None + res = None + sg_type = self.module.params.get('type') + if sg_type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupIngress(**args) + + elif sg_type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupEgress(**args) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + security_group = self._poll_job(res, 'securitygroup') + key = sg_type + "rule" # ingressrule / egressrule + if key in security_group: + rule = security_group[key][0] + return rule + + + def remove_rule(self): + security_group = self.get_security_group() + rule = None + res = None + sg_type = self.module.params.get('type') + if sg_type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) + + elif sg_type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid']) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'securitygroup') + return rule + + + def get_result(self, security_group_rule): + + self.result['type'] = self.module.params.get('type') + self.result['security_group'] = self.module.params.get('security_group') + + if security_group_rule: + rule = security_group_rule + if 'securitygroupname' in rule: + self.result['user_security_group'] = rule['securitygroupname'] + if 'cidr' in rule: + self.result['cidr'] = rule['cidr'] + if 'protocol' in rule: + self.result['protocol'] = rule['protocol'] + if 'startport' in rule: + self.result['start_port'] = rule['startport'] + if 'endport' in rule: + self.result['end_port'] = rule['endport'] + if 'icmpcode' in rule: + self.result['icmp_code'] = rule['icmpcode'] + if 'icmptype' in rule: + self.result['icmp_type'] = rule['icmptype'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + security_group = dict(required=True), + type = dict(choices=['ingress', 'egress'], default='ingress'), + cidr = dict(default='0.0.0.0/0'), + user_security_group = dict(default=None), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None, aliases=['port']), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['icmp_code', 'start_port'], + ['icmp_code', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) + + state = module.params.get('state') + if state in ['absent']: + sg_rule = acs_sg_rule.remove_rule() + else: + sg_rule = acs_sg_rule.add_rule() + + result = acs_sg_rule.get_result(sg_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py new file mode 100644 index 00000000000..180e96ca6ae --- /dev/null +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_sshkeypair +short_description: Manages SSH keys on Apache CloudStack based clouds. +description: + - Create, register and remove SSH keys. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of public key. + required: true + domain: + description: + - Domain the public key is related to. + required: false + default: null + account: + description: + - Account the public key is related to. + required: false + default: null + project: + description: + - Name of the project the public key to be registered in. + required: false + default: null + state: + description: + - State of the public key. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + public_key: + description: + - String of the public key. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a new private / public key pair: +- local_action: cs_sshkeypair name=linus@example.com + register: key +- debug: msg='private key is {{ key.private_key }}' + +# remove a public key by its name: +- local_action: cs_sshkeypair name=linus@example.com state=absent + +# register your existing local public key: +- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +''' + +RETURN = ''' +--- +name: + description: Name of the SSH public key. + returned: success + type: string + sample: linus@example.com +fingerprint: + description: Fingerprint of the SSH public key. + returned: success + type: string + sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" +private_key: + description: Private key of generated SSH keypair. + returned: changed + type: string + sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +try: + import sshpubkeys + has_lib_sshpubkeys = True +except ImportError: + has_lib_sshpubkeys = False + +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackSshKey(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.ssh_key = None + + + def register_ssh_key(self, public_key): + ssh_key = self.get_ssh_key() + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') + + res = None + if not ssh_key: + self.result['changed'] = True + args['publickey'] = public_key + if not self.module.check_mode: + res = self.cs.registerSSHKeyPair(**args) + + else: + fingerprint = self._get_ssh_fingerprint(public_key) + if ssh_key['fingerprint'] != fingerprint: + self.result['changed'] = True + if not self.module.check_mode: + self.cs.deleteSSHKeyPair(**args) + args['publickey'] = public_key + res = self.cs.registerSSHKeyPair(**args) + + if res and 'keypair' in res: + ssh_key = res['keypair'] + + return ssh_key + + + def create_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.createSSHKeyPair(**args) + ssh_key = res['keypair'] + return ssh_key + + + def remove_ssh_key(self): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.deleteSSHKeyPair(**args) + return ssh_key + + + def get_ssh_key(self): + if not self.ssh_key: + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') + + ssh_keys = self.cs.listSSHKeyPairs(**args) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + return self.ssh_key + + + def get_result(self, ssh_key): + if ssh_key: + if 'fingerprint' in ssh_key: + self.result['fingerprint'] = ssh_key['fingerprint'] + if 'name' in ssh_key: + self.result['name'] = ssh_key['name'] + if 'privatekey' in ssh_key: + self.result['private_key'] = ssh_key['privatekey'] + return self.result + + + def _get_ssh_fingerprint(self, public_key): + key = sshpubkeys.SSHKey(public_key) + return key.hash() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + public_key = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + if not has_lib_sshpubkeys: + module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") + + try: + acs_sshkey = AnsibleCloudStackSshKey(module) + state = module.params.get('state') + if state in ['absent']: + ssh_key = acs_sshkey.remove_ssh_key() + else: + public_key = module.params.get('public_key') + if public_key: + ssh_key = acs_sshkey.register_ssh_key(public_key) + else: + ssh_key = acs_sshkey.create_ssh_key() + + result = acs_sshkey.get_result(ssh_key) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py new file mode 100644 index 00000000000..1cd245d2b5c --- /dev/null +++ b/cloud/cloudstack/cs_template.py @@ -0,0 +1,633 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_template +short_description: Manages templates on Apache CloudStack based clouds. +description: + - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the template. + required: true + url: + description: + - URL of where the template is hosted. + - Mutually exclusive with C(vm). + required: false + default: null + vm: + description: + - VM name the template will be created from its volume or alternatively from a snapshot. + - VM must be in stopped state if created from its volume. + - Mutually exclusive with C(url). + required: false + default: null + snapshot: + description: + - Name of the snapshot, created from the VM ROOT volume, the template will be created from. + - C(vm) is required together with this argument. + required: false + default: null + os_type: + description: + - OS type that best represents the OS of this template. + required: false + default: null + checksum: + description: + - The MD5 checksum value of this template. + - If set, we search by checksum instead of name. + required: false + default: false + is_ready: + description: + - This flag is used for searching existing templates. + - If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed. + - Recommended to set it to C(false). + required: false + default: false + is_public: + description: + - Register the template to be publicly available to all users. + - Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the template to be featured. + - Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory. + - Only used if C(state) is present. + required: false + default: false + project: + description: + - Name of the project the template to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the template to be registered or deleted from. + - If not specified, first found zone will be used. + required: false + default: null + template_filter: + description: + - Name of the filter used to search for the template. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ] + hypervisor: + description: + - Name the hypervisor to be used for creating the new template. + - Relevant when using C(state=present). + required: false + default: none + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + requires_hvm: + description: + - true if this template requires HVM. + required: false + default: false + password_enabled: + description: + - True if the template supports the password reset feature. + required: false + default: false + template_tag: + description: + - the tag for this template. + required: false + default: null + sshkey_enabled: + description: + - True if the template supports the sshkey upload feature. + required: false + default: false + is_routing: + description: + - True if the template type is routing i.e., if template is used to deploy router. + - Only considered if C(url) is used. + required: false + default: false + format: + description: + - The format for the template. + - Relevant when using C(state=present). + required: false + default: null + choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ] + is_extractable: + description: + - True if the template or its derivatives are extractable. + required: false + default: false + details: + description: + - Template details in key/value pairs. + required: false + default: null + bits: + description: + - 32 or 64 bits support. + required: false + default: '64' + displaytext: + description: + - the display text of the template. + required: true + default: null + state: + description: + - State of the template. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Register a systemvm template +- local_action: + module: cs_template + name: systemvm-4.5 + url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova" + hypervisor: VMware + format: OVA + zone: tokio-ix + os_type: Debian GNU/Linux 7(64-bit) + is_routing: yes + +# Create a template from a stopped virtual machine's volume +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Create a template from a virtual machine's root volume snapshot +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + snapshot: ROOT-233_2015061509114 + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Remove a template +- local_action: + module: cs_template + name: systemvm-4.2 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the template. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Displaytext of the template. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +checksum: + description: MD5 checksum of the template. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +status: + description: Status of the template. + returned: success + type: string + sample: Download Complete +is_ready: + description: True if the template is ready to be deployed from. + returned: success + type: boolean + sample: true +is_public: + description: True if the template is public. + returned: success + type: boolean + sample: true +is_featured: + description: True if the template is featured. + returned: success + type: boolean + sample: true +is_extractable: + description: True if the template is extractable. + returned: success + type: boolean + sample: true +format: + description: Format of the template. + returned: success + type: string + sample: OVA +os_type: + description: Typo of the OS. + returned: success + type: string + sample: CentOS 6.5 (64-bit) +password_enabled: + description: True if the reset password feature is enabled, false otherwise. + returned: success + type: boolean + sample: false +sshkey_enabled: + description: true if template is sshkey enabled, false otherwise. + returned: success + type: boolean + sample: false +cross_zones: + description: true if the template is managed across all zones, false otherwise. + returned: success + type: boolean + sample: false +template_type: + description: Type of the template. + returned: success + type: string + sample: USER +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +template_tag: + description: Template tag related to this template. + returned: success + type: string + sample: special +hypervisor: + description: Hypervisor related to this template. + returned: success + type: string + sample: VMware +tags: + description: List of resource tags associated with the template. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +zone: + description: Name of zone the template is registered in. + returned: success + type: string + sample: zuerich +domain: + description: Domain the template is related to. + returned: success + type: string + sample: example domain +account: + description: Account the template is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the template is related to. + returned: success + type: string + sample: Production +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackTemplate(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('displaytext') + args['bits'] = self.module.params.get('bits') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isextractable'] = self.module.params.get('is_extractable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + args['passwordenabled'] = self.module.params.get('password_enabled') + args['requireshvm'] = self.module.params.get('requires_hvm') + args['templatetag'] = self.module.params.get('template_tag') + args['ostypeid'] = self.get_os_type(key='id') + + if not args['ostypeid']: + self.module.fail_json(msg="Missing required arguments: os_type") + + if not args['displaytext']: + args['displaytext'] = self.module.params.get('name') + return args + + + def get_root_volume(self, key=None): + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + args['type'] = "ROOT" + + volumes = self.cs.listVolumes(**args) + if volumes: + return self._get_by_key(key, volumes['volume'][0]) + self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name')) + + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['volumeid'] = self.get_root_volume('id') + snapshots = self.cs.listSnapshots(**args) + if snapshots: + for s in snapshots['snapshot']: + if snapshot in [ s['name'], s['id'] ]: + return self._get_by_key(key, s) + self.module.fail_json(msg="Snapshot '%s' not found" % snapshot) + + + def create_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + + args = self._get_args() + snapshot_id = self.get_snapshot(key='id') + if snapshot_id: + args['snapshotid'] = snapshot_id + else: + args['volumeid'] = self.get_root_volume('id') + + if not self.module.check_mode: + template = self.cs.createTemplate(**args) + + if 'errortext' in template: + self.module.fail_json(msg="Failed: '%s'" % template['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self._poll_job(template, 'template') + return template + + + def register_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + args = self._get_args() + args['url'] = self.module.params.get('url') + args['format'] = self.module.params.get('format') + args['checksum'] = self.module.params.get('checksum') + args['isextractable'] = self.module.params.get('is_extractable') + args['isrouting'] = self.module.params.get('is_routing') + args['sshkeyenabled'] = self.module.params.get('sshkey_enabled') + args['hypervisor'] = self.get_hypervisor() + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + if not self.module.check_mode: + res = self.cs.registerTemplate(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + template = res['template'] + return template + + + def get_template(self): + args = {} + args['isready'] = self.module.params.get('is_ready') + args['templatefilter'] = self.module.params.get('template_filter') + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + templates = self.cs.listTemplates(**args) + if templates: + # if checksum is set, we only look on that. + if not checksum: + return templates['template'][0] + else: + for i in templates['template']: + if i['checksum'] == checksum: + return i + return None + + + def remove_template(self): + template = self.get_template() + if template: + self.result['changed'] = True + + args = {} + args['id'] = template['id'] + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.cs.deleteTemplate(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'template') + return template + + + def get_result(self, template): + if template: + if 'displaytext' in template: + self.result['displaytext'] = template['displaytext'] + if 'name' in template: + self.result['name'] = template['name'] + if 'hypervisor' in template: + self.result['hypervisor'] = template['hypervisor'] + if 'zonename' in template: + self.result['zone'] = template['zonename'] + if 'checksum' in template: + self.result['checksum'] = template['checksum'] + if 'format' in template: + self.result['format'] = template['format'] + if 'isready' in template: + self.result['is_ready'] = template['isready'] + if 'ispublic' in template: + self.result['is_public'] = template['ispublic'] + if 'isfeatured' in template: + self.result['is_featured'] = template['isfeatured'] + if 'isextractable' in template: + self.result['is_extractable'] = template['isextractable'] + # and yes! it is really camelCase! + if 'crossZones' in template: + self.result['cross_zones'] = template['crossZones'] + if 'ostypename' in template: + self.result['os_type'] = template['ostypename'] + if 'templatetype' in template: + self.result['template_type'] = template['templatetype'] + if 'passwordenabled' in template: + self.result['password_enabled'] = template['passwordenabled'] + if 'sshkeyenabled' in template: + self.result['sshkey_enabled'] = template['sshkeyenabled'] + if 'status' in template: + self.result['status'] = template['status'] + if 'created' in template: + self.result['created'] = template['created'] + if 'templatetag' in template: + self.result['template_tag'] = template['templatetag'] + if 'tags' in template: + self.result['tags'] = [] + for tag in template['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'domain' in template: + self.result['domain'] = template['domain'] + if 'account' in template: + self.result['account'] = template['account'] + if 'project' in template: + self.result['project'] = template['project'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + url = dict(default=None), + vm = dict(default=None), + snapshot = dict(default=None), + os_type = dict(default=None), + is_ready = dict(type='bool', choices=BOOLEANS, default=False), + is_public = dict(type='bool', choices=BOOLEANS, default=True), + is_featured = dict(type='bool', choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False), + is_extractable = dict(type='bool', choices=BOOLEANS, default=False), + is_routing = dict(type='bool', choices=BOOLEANS, default=False), + checksum = dict(default=None), + template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), + requires_hvm = dict(type='bool', choices=BOOLEANS, default=False), + password_enabled = dict(type='bool', choices=BOOLEANS, default=False), + template_tag = dict(default=None), + sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False), + format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None), + details = dict(default=None), + bits = dict(type='int', choices=[ 32, 64 ], default=64), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + mutually_exclusive = ( + ['url', 'vm'], + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['format', 'url', 'hypervisor'], + ), + required_one_of = ( + ['url', 'vm'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_tpl = AnsibleCloudStackTemplate(module) + + state = module.params.get('state') + if state in ['absent']: + tpl = acs_tpl.remove_template() + else: + url = module.params.get('url') + if url: + tpl = acs_tpl.register_template() + else: + tpl = acs_tpl.create_template() + + result = acs_tpl.get_result(tpl) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py new file mode 100644 index 00000000000..24e8a46fa37 --- /dev/null +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_vmsnapshot +short_description: Manages VM snapshots on Apache CloudStack based clouds. +description: + - Create, remove and revert VM from snapshots. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + name: + description: + - Unique Name of the snapshot. In CloudStack terms C(displayname). + required: true + aliases: ['displayname'] + vm: + description: + - Name of the virtual machine. + required: true + description: + description: + - Description of the snapshot. + required: false + default: null + snapshot_memory: + description: + - Snapshot memory if set to true. + required: false + default: false + zone: + description: + - Name of the zone in which the VM is in. If not set, default zone is used. + required: false + default: null + project: + description: + - Name of the project the VM is assigned to. + required: false + default: null + state: + description: + - State of the snapshot. + required: false + default: 'present' + choices: [ 'present', 'absent', 'revert' ] + domain: + description: + - Domain the VM snapshot is related to. + required: false + default: null + account: + description: + - Account the VM snapshot is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a VM snapshot of disk and memory before an upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + snapshot_memory: yes + +# Revert a VM to a snapshot after a failed upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: revert + +# Remove a VM snapshot after successful upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the snapshot. + returned: success + type: string + sample: snapshot before update +displayname: + description: displayname of the snapshot. + returned: success + type: string + sample: snapshot before update +created: + description: date of the snapshot. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +current: + description: true if snapshot is current + returned: success + type: boolean + sample: True +state: + description: state of the vm snapshot + returned: success + type: string + sample: Allocated +type: + description: type of vm snapshot + returned: success + type: string + sample: DiskAndMemory +description: + description: + description: description of vm snapshot + returned: success + type: string + sample: snapshot brought to you by Ansible +domain: + description: Domain the the vm snapshot is related to. + returned: success + type: string + sample: example domain +account: + description: Account the vm snapshot is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the vm snapshot is related to. + returned: success + type: string + sample: Production +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + + + def get_snapshot(self): + args = {} + args['virtualmachineid'] = self.get_vm('id') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') + + snapshots = self.cs.listVMSnapshot(**args) + if snapshots: + return snapshots['vmSnapshot'][0] + return None + + + def create_snapshot(self): + snapshot = self.get_snapshot() + if not snapshot: + self.result['changed'] = True + + args = {} + args['virtualmachineid'] = self.get_vm('id') + args['name'] = self.module.params.get('name') + args['description'] = self.module.params.get('description') + args['snapshotmemory'] = self.module.params.get('snapshot_memory') + + if not self.module.check_mode: + res = self.cs.createVMSnapshot(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + snapshot = self._poll_job(res, 'vmsnapshot') + + return snapshot + + + def remove_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + + def revert_vm_to_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + + if snapshot['state'] != "Ready": + self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state']) + + if not self.module.check_mode: + res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + self.module.fail_json(msg="snapshot not found, could not revert VM") + + + def get_result(self, snapshot): + if snapshot: + if 'displayname' in snapshot: + self.result['displayname'] = snapshot['displayname'] + if 'created' in snapshot: + self.result['created'] = snapshot['created'] + if 'current' in snapshot: + self.result['current'] = snapshot['current'] + if 'state' in snapshot: + self.result['state'] = snapshot['state'] + if 'type' in snapshot: + self.result['type'] = snapshot['type'] + if 'name' in snapshot: + self.result['name'] = snapshot['name'] + if 'description' in snapshot: + self.result['description'] = snapshot['description'] + if 'domain' in snapshot: + self.result['domain'] = snapshot['domain'] + if 'account' in snapshot: + self.result['account'] = snapshot['account'] + if 'project' in snapshot: + self.result['project'] = snapshot['project'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['displayname']), + vm = dict(required=True), + description = dict(default=None), + zone = dict(default=None), + snapshot_memory = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent', 'revert'], default='present'), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) + + state = module.params.get('state') + if state in ['revert']: + snapshot = acs_vmsnapshot.revert_vm_to_snapshot() + elif state in ['absent']: + snapshot = acs_vmsnapshot.remove_snapshot() + else: + snapshot = acs_vmsnapshot.create_snapshot() + + result = acs_vmsnapshot.get_result(snapshot) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 3b2351b3752..5775a94794d 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -78,8 +78,10 @@ options: default: null aliases: [] -requirements: [ "libcloud" ] -author: Peter Tan +requirements: + - "python >= 2.6" + - "apache-libcloud" +author: "Peter Tan (@tanpeter)" ''' EXAMPLES = ''' diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..bf5fcf3cecf 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -26,7 +26,7 @@ short_description: Manage LXC Containers version_added: 1.8.0 description: - Management of LXC containers -author: Kevin Carter +author: "Kevin Carter (@cloudnull)" options: name: description: @@ -38,6 +38,7 @@ options: - lvm - loop - btrfs + - overlayfs description: - Backend storage type for the container. required: false @@ -112,6 +113,24 @@ options: - Set the log level for a container where *container_log* was set. required: false default: INFO + clone_name: + version_added: "2.0" + description: + - Name of the new cloned server. This is only used when state is + clone. + required: false + default: false + clone_snapshot: + version_added: "2.0" + required: false + choices: + - true + - false + description: + - Create a snapshot a container when cloning. This is not supported + by all container storage backends. Enabling this may fail if the + backing store does not support snapshots. + default: false archive: choices: - true @@ -142,14 +161,21 @@ options: - absent - frozen description: - - Start a container right after it's created. + - Define the state of a container. If you clone a container using + `clone_name` the newly cloned container created in a stopped state. + The running container will be stopped while the clone operation is + happening and upon completion of the clone the original container + state will be restored. required: false default: started container_config: description: - list of 'key=value' options to use when configuring a container. required: false -requirements: ['lxc >= 1.0', 'python2-lxc >= 0.1'] +requirements: + - 'lxc >= 1.0 # OS package' + - 'python >= 2.6 # OS Package' + - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' notes: - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module will @@ -169,7 +195,8 @@ notes: creating the archive. - If your distro does not have a package for "python2-lxc", which is a requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" + "https://github.com/lxc/python2-lxc" or installed via pip using the package + name lxc-python2. """ EXAMPLES = """ @@ -203,6 +230,7 @@ EXAMPLES = """ - name: Create filesystem container lxc_container: name: test-container-config + backing_store: dir container_log: true template: ubuntu state: started @@ -216,7 +244,7 @@ EXAMPLES = """ # Create an lvm container, run a complex command in it, add additional # configuration to it, create an archive of it, and finally leave the container # in a frozen state. The container archive will be compressed using bzip2 -- name: Create an lvm container +- name: Create a frozen lvm container lxc_container: name: test-container-lvm container_log: true @@ -241,14 +269,6 @@ EXAMPLES = """ - name: Debug info on container "test-container-lvm" debug: var=lvm_container_info -- name: Get information on a given container. - lxc_container: - name: test-container-config - register: config_container_info - -- name: debug info on container "test-container" - debug: var=config_container_info - - name: Run a command in a container and ensure its in a "stopped" state. lxc_container: name: test-container-started @@ -263,19 +283,19 @@ EXAMPLES = """ container_command: | echo 'hello world.' | tee /opt/frozen -- name: Start a container. +- name: Start a container lxc_container: name: test-container-stopped state: started -- name: Run a command in a container and then restart it. +- name: Run a command in a container and then restart it lxc_container: name: test-container-started state: restarted container_command: | echo 'hello world.' | tee /opt/restarted -- name: Run a complex command within a "running" container. +- name: Run a complex command within a "running" container lxc_container: name: test-container-started container_command: | @@ -295,7 +315,53 @@ EXAMPLES = """ archive: true archive_path: /opt/archives -- name: Destroy a container. +# Create a container using overlayfs, create an archive of it, create a +# snapshot clone of the container and and finally leave the container +# in a frozen state. The container archive will be compressed using gzip. +- name: Create an overlayfs container archive and clone it + lxc_container: + name: test-container-overlayfs + container_log: true + template: ubuntu + state: started + backing_store: overlayfs + template_options: --release trusty + clone_snapshot: true + clone_name: test-container-overlayfs-clone-snapshot + archive: true + archive_compression: gzip + register: clone_container_info + +- name: debug info on container "test-container" + debug: var=clone_container_info + +- name: Clone a container using snapshot + lxc_container: + name: test-container-overlayfs-clone-snapshot + backing_store: overlayfs + clone_name: test-container-overlayfs-clone-snapshot2 + clone_snapshot: true + +- name: Create a new container and clone it + lxc_container: + name: test-container-new-archive + backing_store: dir + clone_name: test-container-new-archive-clone + +- name: Archive and clone a container then destroy it + lxc_container: + name: test-container-new-archive + state: absent + clone_name: test-container-new-archive-destroyed-clone + archive: true + archive_compression: gzip + +- name: Start a cloned container. + lxc_container: + name: test-container-new-archive-destroyed-clone + state: started + +- name: Destroy a container lxc_container: name: "{{ item }}" state: absent @@ -305,15 +371,22 @@ EXAMPLES = """ - test-container-frozen - test-container-lvm - test-container-config + - test-container-overlayfs + - test-container-overlayfs-clone + - test-container-overlayfs-clone-snapshot + - test-container-overlayfs-clone-snapshot2 + - test-container-new-archive + - test-container-new-archive-clone + - test-container-new-archive-destroyed-clone """ try: import lxc except ImportError: - msg = 'The lxc module is not importable. Check the requirements.' - print("failed=True msg='%s'" % msg) - raise SystemExit(msg) + HAS_LXC = False +else: + HAS_LXC = True # LXC_COMPRESSION_MAP is a map of available compression types when creating @@ -351,6 +424,15 @@ LXC_COMMAND_MAP = { 'directory': '--dir', 'zfs_root': '--zfsroot' } + }, + 'clone': { + 'variables': { + 'backing_store': '--backingstore', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--orig', + 'clone_name': '--new' + } } } @@ -369,6 +451,9 @@ LXC_BACKING_STORE = { ], 'loop': [ 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'overlayfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' ] } @@ -388,7 +473,8 @@ LXC_ANSIBLE_STATES = { 'stopped': '_stopped', 'restarted': '_restarted', 'absent': '_destroyed', - 'frozen': '_frozen' + 'frozen': '_frozen', + 'clone': '_clone' } @@ -398,7 +484,7 @@ LXC_ANSIBLE_STATES = { # home directory of the user that was attached to the container and source # that users environment variables by default. ATTACH_TEMPLATE = """#!/usr/bin/env bash -pushd "$(grep $(whoami) /etc/passwd | awk -F':' '{print $6}')" +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" if [[ -f ".bashrc" ]];then source .bashrc fi @@ -439,18 +525,16 @@ def create_script(command): f.close() # Ensure the script is executable. - os.chmod(script_file, 0755) + os.chmod(script_file, 1755) # Get temporary directory. tempdir = tempfile.gettempdir() # Output log file. - stdout = path.join(tempdir, 'lxc-attach-script.log') - stdout_file = open(stdout, 'ab') + stdout_file = open(path.join(tempdir, 'lxc-attach-script.log'), 'ab') # Error log file. - stderr = path.join(tempdir, 'lxc-attach-script.err') - stderr_file = open(stderr, 'ab') + stderr_file = open(path.join(tempdir, 'lxc-attach-script.err'), 'ab') # Execute the script command. try: @@ -482,6 +566,7 @@ class LxcContainerManagement(object): self.container_name = self.module.params['name'] self.container = self.get_container_bind() self.archive_info = None + self.clone_info = None def get_container_bind(self): return lxc.Container(name=self.container_name) @@ -502,15 +587,15 @@ class LxcContainerManagement(object): return num @staticmethod - def _container_exists(name): + def _container_exists(container_name): """Check if a container exists. - :param name: Name of the container. + :param container_name: Name of the container. :type: ``str`` :returns: True or False if the container is found. :rtype: ``bol`` """ - if [i for i in lxc.list_containers() if i == name]: + if [i for i in lxc.list_containers() if i == container_name]: return True else: return False @@ -543,6 +628,7 @@ class LxcContainerManagement(object): """ # Remove incompatible storage backend options. + variables = variables.copy() for v in LXC_BACKING_STORE[self.module.params['backing_store']]: variables.pop(v, None) @@ -616,7 +702,7 @@ class LxcContainerManagement(object): # TODO(cloudnull) adjust import when issue has been resolved. import ast options_dict = ast.literal_eval(_container_config) - parsed_options = [i.split('=') for i in options_dict] + parsed_options = [i.split('=', 1) for i in options_dict] config_change = False for key, value in parsed_options: @@ -624,7 +710,7 @@ class LxcContainerManagement(object): for option_line in container_config: # Look for key in config if option_line.startswith(key): - _, _value = option_line.split('=') + _, _value = option_line.split('=', 1) config_value = ' '.join(_value.split()) line_index = container_config.index(option_line) # If the sanitized values don't match replace them @@ -655,6 +741,68 @@ class LxcContainerManagement(object): self._container_startup() self.container.freeze() + def _container_create_clone(self): + """Clone a new LXC container from an existing container. + + This method will clone an existing container to a new container using + the `clone_name` variable as the new container name. The method will + create a container if the container `name` does not exist. + + Note that cloning a container will ensure that the original container + is "stopped" before the clone can be done. Because this operation can + require a state change the method will return the original container + to its prior state upon completion of the clone. + + Once the clone is complete the new container will be left in a stopped + state. + """ + + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() + + build_command = [ + self.module.get_bin_path('lxc-clone', True), + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: + build_command.append('--snapshot') + # Check for backing_store == overlayfs if so force the use of snapshot + # If overlay fs is used and snapshot is unset the clone command will + # fail with an unsupported type. + elif self.module.params.get('backing_store') == 'overlayfs': + build_command.append('--snapshot') + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + message = "Failed executing lxc-clone." + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command + ) + ) + else: + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + return True + def _create(self): """Create a new LXC container. @@ -709,9 +857,9 @@ class LxcContainerManagement(object): rc, return_data, err = self._run_command(build_command) if rc != 0: - msg = "Failed executing lxc-create." + message = "Failed executing lxc-create." self.failure( - err=err, rc=rc, msg=msg, command=' '.join(build_command) + err=err, rc=rc, msg=message, command=' '.join(build_command) ) else: self.state_change = True @@ -751,7 +899,7 @@ class LxcContainerManagement(object): :rtype: ``str`` """ - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): return str(self.container.state).lower() else: return str('absent') @@ -794,7 +942,7 @@ class LxcContainerManagement(object): rc=1, msg='The container [ %s ] failed to start. Check to lxc is' ' available and that the container is in a functional' - ' state.' + ' state.' % self.container_name ) def _check_archive(self): @@ -808,6 +956,23 @@ class LxcContainerManagement(object): 'archive': self._container_create_tar() } + def _check_clone(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + clone_name = self.module.params.get('clone_name') + if clone_name: + if not self._container_exists(container_name=clone_name): + self.clone_info = { + 'cloned': self._container_create_clone() + } + else: + self.clone_info = { + 'cloned': False + } + def _destroyed(self, timeout=60): """Ensure a container is destroyed. @@ -816,12 +981,15 @@ class LxcContainerManagement(object): """ for _ in xrange(timeout): - if not self._container_exists(name=self.container_name): + if not self._container_exists(container_name=self.container_name): break # Check if the container needs to have an archive created. self._check_archive() + # Check if the container is to be cloned + self._check_clone() + if self._get_state() != 'stopped': self.state_change = True self.container.stop() @@ -852,7 +1020,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='frozen') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -871,6 +1039,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -886,7 +1057,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='restart') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -896,8 +1067,14 @@ class LxcContainerManagement(object): self.container.stop() self.state_change = True + # Run container startup + self._container_startup() + # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -913,7 +1090,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='stop') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -925,6 +1102,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -940,7 +1120,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='start') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): container_state = self._get_state() if container_state == 'running': pass @@ -965,6 +1145,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -1007,18 +1190,18 @@ class LxcContainerManagement(object): all_lvms = [i.split() for i in stdout.splitlines()][1:] return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] - def _get_vg_free_pe(self, name): + def _get_vg_free_pe(self, vg_name): """Return the available size of a given VG. - :param name: Name of volume. - :type name: ``str`` + :param vg_name: Name of volume. + :type vg_name: ``str`` :returns: size and measurement of an LV :type: ``tuple`` """ build_command = [ 'vgdisplay', - name, + vg_name, '--units', 'g' ] @@ -1027,7 +1210,7 @@ class LxcContainerManagement(object): self.failure( err=err, rc=rc, - msg='failed to read vg %s' % name, + msg='failed to read vg %s' % vg_name, command=' '.join(build_command) ) @@ -1036,17 +1219,17 @@ class LxcContainerManagement(object): _free_pe = free_pe[0].split() return float(_free_pe[-2]), _free_pe[-1] - def _get_lv_size(self, name): + def _get_lv_size(self, lv_name): """Return the available size of a given LV. - :param name: Name of volume. - :type name: ``str`` + :param lv_name: Name of volume. + :type lv_name: ``str`` :returns: size and measurement of an LV :type: ``tuple`` """ vg = self._get_lxc_vg() - lv = os.path.join(vg, name) + lv = os.path.join(vg, lv_name) build_command = [ 'lvdisplay', lv, @@ -1080,7 +1263,7 @@ class LxcContainerManagement(object): """ vg = self._get_lxc_vg() - free_space, messurement = self._get_vg_free_pe(name=vg) + free_space, messurement = self._get_vg_free_pe(vg_name=vg) if free_space < float(snapshot_size_gb): message = ( @@ -1183,25 +1366,25 @@ class LxcContainerManagement(object): return archive_name - def _lvm_lv_remove(self, name): + def _lvm_lv_remove(self, lv_name): """Remove an LV. - :param name: The name of the logical volume - :type name: ``str`` + :param lv_name: The name of the logical volume + :type lv_name: ``str`` """ vg = self._get_lxc_vg() build_command = [ self.module.get_bin_path('lvremove', True), "-f", - "%s/%s" % (vg, name), + "%s/%s" % (vg, lv_name), ] rc, stdout, err = self._run_command(build_command) if rc != 0: self.failure( err=err, rc=rc, - msg='Failed to remove LVM LV %s/%s' % (vg, name), + msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), command=' '.join(build_command) ) @@ -1213,31 +1396,71 @@ class LxcContainerManagement(object): :param temp_dir: path to the temporary local working directory :type temp_dir: ``str`` """ + # This loop is created to support overlayfs archives. This should + # squash all of the layers into a single archive. + fs_paths = container_path.split(':') + if 'overlayfs' in fs_paths: + fs_paths.pop(fs_paths.index('overlayfs')) + + for fs_path in fs_paths: + # Set the path to the container data + fs_path = os.path.dirname(fs_path) + + # Run the sync command + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + fs_path, + temp_dir + ] + rc, stdout, err = self._run_command( + build_command, + unsafe_shell=True + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ build_command = [ - self.module.get_bin_path('rsync', True), - '-aHAX', - container_path, - temp_dir + self.module.get_bin_path('umount', True), + mount_point, ] - rc, stdout, err = self._run_command(build_command, unsafe_shell=True) + rc, stdout, err = self._run_command(build_command) if rc != 0: self.failure( err=err, rc=rc, - msg='failed to perform archive', + msg='failed to unmount [ %s ]' % mount_point, command=' '.join(build_command) ) - def _unmount(self, mount_point): - """Unmount a file system. + def _overlayfs_mount(self, lowerdir, upperdir, mount_point): + """mount an lv. + :param lowerdir: name/path of the lower directory + :type lowerdir: ``str`` + :param upperdir: name/path of the upper directory + :type upperdir: ``str`` :param mount_point: path on the file system that is mounted. :type mount_point: ``str`` """ build_command = [ - self.module.get_bin_path('umount', True), + self.module.get_bin_path('mount', True), + '-t overlayfs', + '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), + 'overlayfs', mount_point, ] rc, stdout, err = self._run_command(build_command) @@ -1245,8 +1468,8 @@ class LxcContainerManagement(object): self.failure( err=err, rc=rc, - msg='failed to unmount [ %s ]' % mount_point, - command=' '.join(build_command) + msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' + % (lowerdir, upperdir, mount_point, build_command) ) def _container_create_tar(self): @@ -1275,13 +1498,15 @@ class LxcContainerManagement(object): # Test if the containers rootfs is a block device block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + + # Test if the container is using overlayfs + overlayfs_backed = lxc_rootfs.startswith('overlayfs') + mount_point = os.path.join(work_dir, 'rootfs') # Set the snapshot name if needed snapshot_name = '%s_lxc_snapshot' % self.container_name - # Set the path to the container data - container_path = os.path.dirname(lxc_rootfs) container_state = self._get_state() try: # Ensure the original container is stopped or frozen @@ -1292,7 +1517,7 @@ class LxcContainerManagement(object): self.container.stop() # Sync the container data from the container_path to work_dir - self._rsync_data(container_path, temp_dir) + self._rsync_data(lxc_rootfs, temp_dir) if block_backed: if snapshot_name not in self._lvm_lv_list(): @@ -1301,7 +1526,7 @@ class LxcContainerManagement(object): # Take snapshot size, measurement = self._get_lv_size( - name=self.container_name + lv_name=self.container_name ) self._lvm_snapshot_create( source_lv=self.container_name, @@ -1322,25 +1547,33 @@ class LxcContainerManagement(object): ' up old snapshot of containers before continuing.' % snapshot_name ) - - # Restore original state of container - if container_state == 'running': - if self._get_state() == 'frozen': - self.container.unfreeze() - else: - self.container.start() + elif overlayfs_backed: + lowerdir, upperdir = lxc_rootfs.split(':')[1:] + self._overlayfs_mount( + lowerdir=lowerdir, + upperdir=upperdir, + mount_point=mount_point + ) # Set the state as changed and set a new fact self.state_change = True return self._create_tar(source_dir=work_dir) finally: - if block_backed: + if block_backed or overlayfs_backed: # unmount snapshot self._unmount(mount_point) + if block_backed: # Remove snapshot self._lvm_lv_remove(snapshot_name) + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + # Remove tmpdir shutil.rmtree(temp_dir) @@ -1374,6 +1607,9 @@ class LxcContainerManagement(object): if self.archive_info: outcome.update(self.archive_info) + if self.clone_info: + outcome.update(self.clone_info) + self.module.exit_json( changed=self.state_change, lxc_container=outcome @@ -1450,6 +1686,14 @@ def main(): choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], default='INFO' ), + clone_name=dict( + type='str', + required=False + ), + clone_snapshot=dict( + choices=BOOLEANS, + default='false' + ), archive=dict( choices=BOOLEANS, default='false' @@ -1466,6 +1710,11 @@ def main(): supports_check_mode=False, ) + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + lv_name = module.params.get('lv_name') if not lv_name: module.params['lv_name'] = module.params.get('name') @@ -1477,4 +1726,3 @@ def main(): # import module bits from ansible.module_utils.basic import * main() - diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py old mode 100755 new mode 100644 index 2d54ad3f401..6e8f3281dc5 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ovirt -author: Vincent Van der Kussen +author: "Vincent Van der Kussen (@vincentvdk)" short_description: oVirt/RHEV platform management description: - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform @@ -152,7 +152,9 @@ options: aliases: [] choices: ['present', 'absent', 'shutdown', 'started', 'restarted'] -requirements: [ "ovirt-engine-sdk" ] +requirements: + - "python >= 2.6" + - "ovirt-engine-sdk-python" ''' EXAMPLES = ''' # Basic example provisioning from image. diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py new file mode 100644 index 00000000000..7be4361edbe --- /dev/null +++ b/cloud/misc/proxmox.py @@ -0,0 +1,433 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: proxmox +short_description: management of instances in Proxmox VE cluster +description: + - allows you to create/delete/stop instances in Proxmox VE cluster +version_added: "2.0" +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + default: null + required: false + vmid: + description: + - the instance id + default: null + required: true + validate_certs: + description: + - enable / disable https certificate verification + default: false + required: false + type: boolean + node: + description: + - Proxmox VE node, when new VM will be created + - required only for C(state=present) + - for another states will be autodiscovered + default: null + required: false + password: + description: + - the instance root password + - required only for C(state=present) + default: null + required: false + hostname: + description: + - the instance hostname + - required only for C(state=present) + default: null + required: false + ostemplate: + description: + - the template for VM creating + - required only for C(state=present) + default: null + required: false + disk: + description: + - hard disk size in GB for instance + default: 3 + required: false + cpus: + description: + - numbers of allocated cpus for instance + default: 1 + required: false + memory: + description: + - memory size in MB for instance + default: 512 + required: false + swap: + description: + - swap memory size in MB for instance + default: 0 + required: false + netif: + description: + - specifies network interfaces for the container + default: null + required: false + type: string + ip_address: + description: + - specifies the address the container will be assigned + default: null + required: false + type: string + onboot: + description: + - specifies whether a VM will be started during system bootup + default: false + required: false + type: boolean + storage: + description: + - target storage + default: 'local' + required: false + type: string + cpuunits: + description: + - CPU weight for a VM + default: 1000 + required: false + type: integer + nameserver: + description: + - sets DNS server IP address for a container + default: null + required: false + type: string + searchdomain: + description: + - sets DNS search domain for a container + default: null + required: false + type: string + timeout: + description: + - timeout for operations + default: 30 + required: false + type: integer + force: + description: + - forcing operations + - can be used only with states C(present), C(stopped), C(restarted) + - with C(state=present) force option allow to overwrite existing container + - with states C(stopped) , C(restarted) allow to force stop instance + default: false + required: false + type: boolean + state: + description: + - Indicate desired state of the instance + choices: ['present', 'started', 'absent', 'stopped', 'restarted'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: "Sergei Antipov @UnderGreen" +''' + +EXAMPLES = ''' +# Create new container with minimal options +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container with minimal options with force(it will rewrite existing container) +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes + +# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Start container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started + +# Stop container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped + +# Stop container with force +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped + +# Restart container(stopped or mounted container you can't restart) +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped + +# Remove container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent +''' + +import os +import time + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +def get_instance(proxmox, vmid): + return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ] + +def content_check(proxmox, node, ostemplate, storage): + return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ] + +def node_check(proxmox, node): + return [ True for nd in proxmox.nodes.get() if nd['node'] == node ] + +def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs): + proxmox_node = proxmox.nodes(node) + taskid = proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap, + cpus=cpus, disk=disk, **kwargs) + + while timeout: + if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' + and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def start_instance(module, proxmox, vm, vmid, timeout): + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def stop_instance(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post(forceStop=1) + else: + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def umount_instance(module, proxmox, vm, vmid, timeout): + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.umount.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(no_log=True), + vmid = dict(required=True), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), + node = dict(), + password = dict(no_log=True), + hostname = dict(), + ostemplate = dict(), + disk = dict(type='int', default=3), + cpus = dict(type='int', default=1), + memory = dict(type='int', default=512), + swap = dict(type='int', default=0), + netif = dict(), + ip_address = dict(), + onboot = dict(type='bool', choices=BOOLEANS, default='no'), + storage = dict(default='local'), + cpuunits = dict(type='int', default=1000), + nameserver = dict(), + searchdomain = dict(), + timeout = dict(type='int', default=30), + force = dict(type='bool', choices=BOOLEANS, default='no'), + state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + vmid = module.params['vmid'] + validate_certs = module.params['validate_certs'] + node = module.params['node'] + disk = module.params['disk'] + cpus = module.params['cpus'] + memory = module.params['memory'] + swap = module.params['swap'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError, e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + except Exception, e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + if get_instance(proxmox, vmid) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): + module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') + elif not node_check(proxmox, node): + module.fail_json(msg="node '%s' not exists in cluster" % node) + elif not content_check(proxmox, node, module.params['ostemplate'], storage): + module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" + % (module.params['ostemplate'], node, storage)) + + create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, + password = module.params['password'], + hostname = module.params['hostname'], + ostemplate = module.params['ostemplate'], + netif = module.params['netif'], + ip_address = module.params['ip_address'], + onboot = int(module.params['onboot']), + cpuunits = module.params['cpuunits'], + nameserver = module.params['nameserver'], + searchdomain = module.params['searchdomain'], + force = int(module.params['force'])) + + module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) + except Exception, e: + module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'started': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) + + if start_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) + except Exception, e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'stopped': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + if module.params['force']: + if umount_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + else: + module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " + "You can use force option to umount it.") % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) + + if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception, e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'restarted': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if ( proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped' + or proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted' ): + module.exit_json(changed=False, msg="VM %s is not running" % vmid) + + if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and + start_instance(module, proxmox, vm, vmid, timeout) ): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + except Exception, e: + module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'absent': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + + taskid = proxmox.nodes(vm[0]['node']).openvz.delete(vmid) + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + except Exception, e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py new file mode 100644 index 00000000000..7fed47f7260 --- /dev/null +++ b/cloud/misc/proxmox_template.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: proxmox_template +short_description: management of OS templates in Proxmox VE cluster +description: + - allows you to upload/delete templates in Proxmox VE cluster +version_added: "2.0" +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + default: null + required: false + validate_certs: + description: + - enable / disable https certificate verification + default: false + required: false + type: boolean + node: + description: + - Proxmox VE node, when you will operate with template + default: null + required: true + src: + description: + - path to uploaded file + - required only for C(state=present) + default: null + required: false + aliases: ['path'] + template: + description: + - the template name + - required only for states C(absent), C(info) + default: null + required: false + content_type: + description: + - content type + - required only for C(state=present) + default: 'vztmpl' + required: false + choices: ['vztmpl', 'iso'] + storage: + description: + - target storage + default: 'local' + required: false + type: string + timeout: + description: + - timeout for operations + default: 30 + required: false + type: integer + force: + description: + - can be used only with C(state=present), exists template will be overwritten + default: false + required: false + type: boolean + state: + description: + - Indicate desired state of the template + choices: ['present', 'absent'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: "Sergei Antipov @UnderGreen" +''' + +EXAMPLES = ''' +# Upload new openvz template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with all options and force overwrite +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes + +# Delete template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent +''' + +import os +import time + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +def get_template(proxmox, node, storage, content_type, template): + return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get() + if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ] + +def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): + taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) + while timeout: + task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get() + if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s' + % proxmox.node(node).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def delete_template(module, proxmox, node, storage, content_type, template, timeout): + volid = '%s:%s/%s' % (storage, content_type, template) + proxmox.nodes(node).storage(storage).content.delete(volid) + while timeout: + if not get_template(proxmox, node, storage, content_type, template): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for deleting template.') + + time.sleep(1) + return False + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(no_log=True), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), + node = dict(), + src = dict(), + template = dict(), + content_type = dict(default='vztmpl', choices=['vztmpl','iso']), + storage = dict(default='local'), + timeout = dict(type='int', default=30), + force = dict(type='bool', choices=BOOLEANS, default='no'), + state = dict(default='present', choices=['present', 'absent']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + validate_certs = module.params['validate_certs'] + node = module.params['node'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError, e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + except Exception, e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + content_type = module.params['content_type'] + src = module.params['src'] + + from ansible import utils + realpath = utils.path_dwim(None, src) + template = os.path.basename(realpath) + if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) + elif not src: + module.fail_json(msg='src param to uploading template file is mandatory') + elif not (os.path.exists(realpath) and os.path.isfile(realpath)): + module.fail_json(msg='template file on path %s not exists' % realpath) + + if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) + + elif state == 'absent': + try: + content_type = module.params['content_type'] + template = module.params['template'] + + if not template: + module.fail_json(msg='template param is mandatory') + elif not get_template(proxmox, node, storage, content_type, template): + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) + + if delete_template(module, proxmox, node, storage, content_type, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index f1d36fc1964..80b8e2558eb 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -55,8 +55,13 @@ options: - XML document used with the define command required: false default: null -requirements: [ "libvirt" ] -author: Michael DeHaan, Seth Vidal +requirements: + - "python >= 2.6" + - "libvirt-python" +author: + - "Ansible Core Team" + - "Michael DeHaan" + - "Seth Vidal" ''' EXAMPLES = ''' diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py new file mode 100644 index 00000000000..a3f29e22f50 --- /dev/null +++ b/cloud/rackspace/rax_mon_alarm.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_alarm +short_description: Create or delete a Rackspace Cloud Monitoring alarm. +description: +- Create or delete a Rackspace Cloud Monitoring alarm that associates an + existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with + criteria that specify what conditions will trigger which levels of + notifications. Rackspace monitoring module flow | rax_mon_entity -> + rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> + *rax_mon_alarm* +version_added: "2.0" +options: + state: + description: + - Ensure that the alarm with this C(label) exists or does not exist. + choices: [ "present", "absent" ] + required: false + default: present + label: + description: + - Friendly name for this alarm, used to achieve idempotence. Must be a String + between 1 and 255 characters long. + required: true + entity_id: + description: + - ID of the entity this alarm is attached to. May be acquired by registering + the value of a rax_mon_entity task. + required: true + check_id: + description: + - ID of the check that should be alerted on. May be acquired by registering + the value of a rax_mon_check task. + required: true + notification_plan_id: + description: + - ID of the notification plan to trigger if this alarm fires. May be acquired + by registering the value of a rax_mon_notification_plan task. + required: true + criteria: + description: + - Alarm DSL that describes alerting conditions and their output states. Must + be between 1 and 16384 characters long. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html + for a reference on the alerting language. + disabled: + description: + - If yes, create this alarm, but leave it in an inactive state. Defaults to + no. + choices: [ "yes", "no" ] + metadata: + description: + - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String + keys and values between 1 and 255 characters long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Alarm example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure that a specific alarm exists. + rax_mon_alarm: + credentials: ~/.rax_pub + state: present + label: uhoh + entity_id: "{{ the_entity['entity']['id'] }}" + check_id: "{{ the_check['check']['id'] }}" + notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" + criteria: > + if (rate(metric['average']) > 10) { + return new AlarmStatus(WARNING); + } + return new AlarmStatus(OK); + register: the_alarm +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, + disabled, metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if criteria and len(criteria) < 1 or len(criteria) > 16384: + module.fail_json(msg='criteria must be between 1 and 16384 characters long') + + # Coerce attributes. + + changed = False + alarm = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [a for a in cm.list_alarms(entity_id) if a.label == label] + + if existing: + alarm = existing[0] + + if state == 'present': + should_create = False + should_update = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s existing alarms have the label %s.' % + (len(existing), label)) + + if alarm: + if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: + should_delete = should_create = True + + should_update = (disabled and disabled != alarm.disabled) or \ + (metadata and metadata != alarm.metadata) or \ + (criteria and criteria != alarm.criteria) + + if should_update and not should_delete: + cm.update_alarm(entity=entity_id, alarm=alarm, + criteria=criteria, disabled=disabled, + label=label, metadata=metadata) + changed = True + + if should_delete: + alarm.delete() + changed = True + else: + should_create = True + + if should_create: + alarm = cm.create_alarm(entity=entity_id, check=check_id, + notification_plan=notification_plan_id, + criteria=criteria, disabled=disabled, label=label, + metadata=metadata) + changed = True + else: + for a in existing: + a.delete() + changed = True + + if alarm: + alarm_dict = { + "id": alarm.id, + "label": alarm.label, + "check_id": alarm.check_id, + "notification_plan_id": alarm.notification_plan_id, + "criteria": alarm.criteria, + "disabled": alarm.disabled, + "metadata": alarm.metadata + } + module.exit_json(changed=changed, alarm=alarm_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + entity_id=dict(required=True), + check_id=dict(required=True), + notification_plan_id=dict(required=True), + criteria=dict(), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + entity_id = module.params.get('entity_id') + check_id = module.params.get('check_id') + notification_plan_id = module.params.get('notification_plan_id') + criteria = module.params.get('criteria') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + alarm(module, state, label, entity_id, check_id, notification_plan_id, + criteria, disabled, metadata) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py new file mode 100644 index 00000000000..14b86864e2f --- /dev/null +++ b/cloud/rackspace/rax_mon_check.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_check +short_description: Create or delete a Rackspace Cloud Monitoring check for an + existing entity. +description: +- Create or delete a Rackspace Cloud Monitoring check associated with an + existing rax_mon_entity. A check is a specific test or measurement that is + performed, possibly from different monitoring zones, on the systems you + monitor. Rackspace monitoring module flow | rax_mon_entity -> + *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> + rax_mon_alarm +version_added: "2.0" +options: + state: + description: + - Ensure that a check with this C(label) exists or does not exist. + choices: ["present", "absent"] + entity_id: + description: + - ID of the rax_mon_entity to target with this check. + required: true + label: + description: + - Defines a label for this check, between 1 and 64 characters long. + required: true + check_type: + description: + - The type of check to create. C(remote.) checks may be created on any + rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities + that have a non-null C(agent_id). + choices: + - remote.dns + - remote.ftp-banner + - remote.http + - remote.imap-banner + - remote.mssql-banner + - remote.mysql-banner + - remote.ping + - remote.pop3-banner + - remote.postgresql-banner + - remote.smtp-banner + - remote.smtp + - remote.ssh + - remote.tcp + - remote.telnet-banner + - agent.filesystem + - agent.memory + - agent.load_average + - agent.cpu + - agent.disk + - agent.network + - agent.plugin + required: true + monitoring_zones_poll: + description: + - Comma-separated list of the names of the monitoring zones the check should + run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, + mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. + target_hostname: + description: + - One of `target_hostname` and `target_alias` is required for remote.* checks, + but prohibited for agent.* checks. The hostname this check should target. + Must be a valid IPv4, IPv6, or FQDN. + target_alias: + description: + - One of `target_alias` and `target_hostname` is required for remote.* checks, + but prohibited for agent.* checks. Use the corresponding key in the entity's + `ip_addresses` hash to resolve an IP address to target. + details: + description: + - Additional details specific to the check type. Must be a hash of strings + between 1 and 255 characters long, or an array or object containing 0 to + 256 items. + disabled: + description: + - If "yes", ensure the check is created, but don't actually use it yet. + choices: [ "yes", "no" ] + metadata: + description: + - Hash of arbitrary key-value pairs to accompany this check if it fires. + Keys and values must be strings between 1 and 255 characters long. + period: + description: + - The number of seconds between each time the check is performed. Must be + greater than the minimum period set on your account. + timeout: + description: + - The number of seconds this check will wait when attempting to collect + results. Must be less than the period. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Create a monitoring check + gather_facts: False + hosts: local + connection: local + tasks: + - name: Associate a check with an existing entity. + rax_mon_check: + credentials: ~/.rax_pub + state: present + entity_id: "{{ the_entity['entity']['id'] }}" + label: the_check + check_type: remote.ping + monitoring_zones_poll: mziad,mzord,mzdfw + details: + count: 10 + meta: + hurf: durf + register: the_check +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout): + + # Coerce attributes. + + if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): + monitoring_zones_poll = [monitoring_zones_poll] + + if period: + period = int(period) + + if timeout: + timeout = int(timeout) + + changed = False + check = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + entity = cm.get_entity(entity_id) + if not entity: + module.fail_json(msg='Failed to instantiate entity. "%s" may not be' + ' a valid entity id.' % entity_id) + + existing = [e for e in entity.list_checks() if e.label == label] + + if existing: + check = existing[0] + + if state == 'present': + if len(existing) > 1: + module.fail_json(msg='%s existing checks have a label of %s.' % + (len(existing), label)) + + should_delete = False + should_create = False + should_update = False + + if check: + # Details may include keys set to default values that are not + # included in the initial creation. + # + # Only force a recreation of the check if one of the *specified* + # keys is missing or has a different value. + if details: + for (key, value) in details.iteritems(): + if key not in check.details: + should_delete = should_create = True + elif value != check.details[key]: + should_delete = should_create = True + + should_update = label != check.label or \ + (target_hostname and target_hostname != check.target_hostname) or \ + (target_alias and target_alias != check.target_alias) or \ + (disabled != check.disabled) or \ + (metadata and metadata != check.metadata) or \ + (period and period != check.period) or \ + (timeout and timeout != check.timeout) or \ + (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) + + if should_update and not should_delete: + check.update(label=label, + disabled=disabled, + metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, + timeout=timeout, + period=period, + target_alias=target_alias, + target_hostname=target_hostname) + changed = True + else: + # The check doesn't exist yet. + should_create = True + + if should_delete: + check.delete() + + if should_create: + check = cm.create_check(entity, + label=label, + check_type=check_type, + target_hostname=target_hostname, + target_alias=target_alias, + monitoring_zones_poll=monitoring_zones_poll, + details=details, + disabled=disabled, + metadata=metadata, + period=period, + timeout=timeout) + changed = True + elif state == 'absent': + if check: + check.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if check: + check_dict = { + "id": check.id, + "label": check.label, + "type": check.type, + "target_hostname": check.target_hostname, + "target_alias": check.target_alias, + "monitoring_zones_poll": check.monitoring_zones_poll, + "details": check.details, + "disabled": check.disabled, + "metadata": check.metadata, + "period": check.period, + "timeout": check.timeout + } + module.exit_json(changed=changed, check=check_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + entity_id=dict(required=True), + label=dict(required=True), + check_type=dict(required=True), + monitoring_zones_poll=dict(), + target_hostname=dict(), + target_alias=dict(), + details=dict(type='dict', default={}), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict', default={}), + period=dict(type='int'), + timeout=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + entity_id = module.params.get('entity_id') + label = module.params.get('label') + check_type = module.params.get('check_type') + monitoring_zones_poll = module.params.get('monitoring_zones_poll') + target_hostname = module.params.get('target_hostname') + target_alias = module.params.get('target_alias') + details = module.params.get('details') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + period = module.params.get('period') + timeout = module.params.get('timeout') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py new file mode 100644 index 00000000000..f5f142d2165 --- /dev/null +++ b/cloud/rackspace/rax_mon_entity.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_entity +short_description: Create or delete a Rackspace Cloud Monitoring entity +description: +- Create or delete a Rackspace Cloud Monitoring entity, which represents a device + to monitor. Entities associate checks and alarms with a target system and + provide a convenient, centralized place to store IP addresses. Rackspace + monitoring module flow | *rax_mon_entity* -> rax_mon_check -> + rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "2.0" +options: + label: + description: + - Defines a name for this entity. Must be a non-empty string between 1 and + 255 characters long. + required: true + state: + description: + - Ensure that an entity with this C(name) exists or does not exist. + choices: ["present", "absent"] + agent_id: + description: + - Rackspace monitoring agent on the target device to which this entity is + bound. Necessary to collect C(agent.) rax_mon_checks against this entity. + named_ip_addresses: + description: + - Hash of IP addresses that may be referenced by name by rax_mon_checks + added to this entity. Must be a dictionary of with keys that are names + between 1 and 64 characters long, and values that are valid IPv4 or IPv6 + addresses. + metadata: + description: + - Hash of arbitrary C(name), C(value) pairs that are passed to associated + rax_mon_alarms. Names and values must all be between 1 and 255 characters + long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Entity example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure an entity exists + rax_mon_entity: + credentials: ~/.rax_pub + state: present + label: my_entity + named_ip_addresses: + web_box: 192.168.0.10 + db_box: 192.168.0.11 + meta: + hurf: durf + register: the_entity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, + metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for entity in cm.list_entities(): + if label == entity.label: + existing.append(entity) + + entity = None + + if existing: + entity = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing entities have the label %s.' % + (len(existing), label)) + + if entity: + if named_ip_addresses and named_ip_addresses != entity.ip_addresses: + should_delete = should_create = True + + # Change an existing Entity, unless there's nothing to do. + should_update = agent_id and agent_id != entity.agent_id or \ + (metadata and metadata != entity.metadata) + + if should_update and not should_delete: + entity.update(agent_id, metadata) + changed = True + + if should_delete: + entity.delete() + else: + should_create = True + + if should_create: + # Create a new Entity. + entity = cm.create_entity(label=label, agent=agent_id, + ip_addresses=named_ip_addresses, + metadata=metadata) + changed = True + else: + # Delete the existing Entities. + for e in existing: + e.delete() + changed = True + + if entity: + entity_dict = { + "id": entity.id, + "name": entity.name, + "agent_id": entity.agent_id, + } + module.exit_json(changed=changed, entity=entity_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + agent_id=dict(), + named_ip_addresses=dict(type='dict', default={}), + metadata=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + agent_id = module.params.get('agent_id') + named_ip_addresses = module.params.get('named_ip_addresses') + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py new file mode 100644 index 00000000000..d7b6692dc2c --- /dev/null +++ b/cloud/rackspace/rax_mon_notification.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification +short_description: Create or delete a Rackspace Cloud Monitoring notification. +description: +- Create or delete a Rackspace Cloud Monitoring notification that specifies a + channel that can be used to communicate alarms, such as email, webhooks, or + PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> + *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "2.0" +options: + state: + description: + - Ensure that the notification with this C(label) exists or does not exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification. String between 1 and 255 + characters long. + required: true + notification_type: + description: + - A supported notification type. + choices: ["webhook", "email", "pagerduty"] + required: true + details: + description: + - Dictionary of key-value pairs used to initialize the notification. + Required keys and meanings vary with notification type. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ + service-notification-types-crud.html for details. + required: true +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Monitoring notification example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Email me when something goes wrong. + rax_mon_entity: + credentials: ~/.rax_pub + label: omg + type: email + details: + address: me@mailhost.com + register: the_notification +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification(module, state, label, notification_type, details): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notifications(): + if n.label == label: + existing.append(n) + + if existing: + notification = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing notifications are labelled %s.' % + (len(existing), label)) + + if notification: + should_delete = (notification_type != notification.type) + + should_update = (details != notification.details) + + if should_update and not should_delete: + notification.update(details=notification.details) + changed = True + + if should_delete: + notification.delete() + else: + should_create = True + + if should_create: + notification = cm.create_notification(notification_type, + label=label, details=details) + changed = True + else: + for n in existing: + n.delete() + changed = True + + if notification: + notification_dict = { + "id": notification.id, + "type": notification.type, + "label": notification.label, + "details": notification.details + } + module.exit_json(changed=changed, notification=notification_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), + details=dict(required=True, type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + notification_type = module.params.get('notification_type') + details = module.params.get('details') + + setup_rax_module(module, pyrax) + + notification(module, state, label, notification_type, details) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py new file mode 100644 index 00000000000..5bb3fa1652a --- /dev/null +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification_plan +short_description: Create or delete a Rackspace Cloud Monitoring notification + plan. +description: +- Create or delete a Rackspace Cloud Monitoring notification plan by + associating existing rax_mon_notifications with severity levels. Rackspace + monitoring module flow | rax_mon_entity -> rax_mon_check -> + rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm +version_added: "2.0" +options: + state: + description: + - Ensure that the notification plan with this C(label) exists or does not + exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification plan. String between 1 and + 255 characters long. + required: true + critical_state: + description: + - Notification list to use when the alarm state is CRITICAL. Must be an + array of valid rax_mon_notification ids. + warning_state: + description: + - Notification list to use when the alarm state is WARNING. Must be an array + of valid rax_mon_notification ids. + ok_state: + description: + - Notification list to use when the alarm state is OK. Must be an array of + valid rax_mon_notification ids. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Example notification plan + gather_facts: False + hosts: local + connection: local + tasks: + - name: Establish who gets called when. + rax_mon_notification_plan: + credentials: ~/.rax_pub + state: present + label: defcon1 + critical_state: + - "{{ everyone['notification']['id'] }}" + warning_state: + - "{{ opsfloor['notification']['id'] }}" + register: defcon1 +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification_plan(module, state, label, critical_state, warning_state, ok_state): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification_plan = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notification_plans(): + if n.label == label: + existing.append(n) + + if existing: + notification_plan = existing[0] + + if state == 'present': + should_create = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s notification plans are labelled %s.' % + (len(existing), label)) + + if notification_plan: + should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ + (warning_state and warning_state != notification_plan.warning_state) or \ + (ok_state and ok_state != notification_plan.ok_state) + + if should_delete: + notification_plan.delete() + should_create = True + else: + should_create = True + + if should_create: + notification_plan = cm.create_notification_plan(label=label, + critical_state=critical_state, + warning_state=warning_state, + ok_state=ok_state) + changed = True + else: + for np in existing: + np.delete() + changed = True + + if notification_plan: + notification_plan_dict = { + "id": notification_plan.id, + "critical_state": notification_plan.critical_state, + "warning_state": notification_plan.warning_state, + "ok_state": notification_plan.ok_state, + "metadata": notification_plan.metadata + } + module.exit_json(changed=changed, notification_plan=notification_plan_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + critical_state=dict(type='list'), + warning_state=dict(type='list'), + ok_state=dict(type='list') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + critical_state = module.params.get('critical_state') + warning_state = module.params.get('warning_state') + ok_state = module.params.get('ok_state') + + setup_rax_module(module, pyrax) + + notification_plan(module, state, label, critical_state, warning_state, ok_state) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py new file mode 100644 index 00000000000..b2083222ed5 --- /dev/null +++ b/cloud/vmware/vmware_datacenter.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_datacenter +short_description: Manage VMware vSphere Datacenters +description: + - Manage VMware vSphere Datacenters +version_added: 2.0 +author: "Joseph Callen (@jcpowermac)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + datacenter_name: + description: + - The name of the datacenter the cluster will be created in. + required: True + state: + description: + - If the datacenter should be present or absent + choices: ['present', 'absent'] + required: True +''' + +EXAMPLES = ''' +# Example vmware_datacenter command from Ansible Playbooks +- name: Create Datacenter + local_action: > + vmware_datacenter + hostname="{{ ansible_ssh_host }}" username=root password=vmware + datacenter_name="datacenter" +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def state_create_datacenter(module): + datacenter_name = module.params['datacenter_name'] + content = module.params['content'] + changed = True + datacenter = None + + folder = content.rootFolder + + try: + if not module.check_mode: + datacenter = folder.CreateDatacenter(name=datacenter_name) + module.exit_json(changed=changed, result=str(datacenter)) + except vim.fault.DuplicateName: + module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name) + except vim.fault.InvalidName: + module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name) + except vmodl.fault.NotSupported: + # This should never happen + module.fail_json(msg="Trying to create a datacenter on an incorrect folder object") + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def check_datacenter_state(module): + datacenter_name = module.params['datacenter_name'] + + try: + content = connect_to_api(module) + datacenter = find_datacenter_by_name(content, datacenter_name) + module.params['content'] = content + + if datacenter is None: + return 'absent' + else: + module.params['datacenter'] = datacenter + return 'present' + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_destroy_datacenter(module): + datacenter = module.params['datacenter'] + changed = True + result = None + + try: + if not module.check_mode: + task = datacenter.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=result) + except vim.fault.VimFault as vim_fault: + module.fail_json(msg=vim_fault.msg) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update( + dict( + datacenter_name=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + datacenter_states = { + 'absent': { + 'present': state_destroy_datacenter, + 'absent': state_exit_unchanged, + }, + 'present': { + 'present': state_exit_unchanged, + 'absent': state_create_datacenter, + } + } + desired_state = module.params['state'] + current_state = check_datacenter_state(module) + + datacenter_states[desired_state][current_state](module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vsphere_copy.py b/cloud/vmware/vsphere_copy.py new file mode 100644 index 00000000000..7c044a7d51a --- /dev/null +++ b/cloud/vmware/vsphere_copy.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015 Dag Wieers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vsphere_copy +short_description: Copy a file to a vCenter datastore +description: Upload files to a vCenter datastore +version_added: 2.0 +author: Dag Wieers (@dagwieers) +options: + host: + description: + - The vCenter server on which the datastore is available. + required: true + login: + description: + - The login name to authenticate on the vCenter server. + required: true + password: + description: + - The password to authenticate on the vCenter server. + required: true + src: + description: + - The file to push to vCenter + required: true + datacenter: + description: + - The datacenter on the vCenter server that holds the datastore. + required: true + datastore: + description: + - The datastore on the vCenter server to push files to. + required: true + path: + description: + - The file to push to the datastore on the vCenter server. + required: true +notes: + - "This module ought to be run from a system that can access vCenter directly and has the file to transfer. + It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)." + - Tested on vSphere 5.5 +''' + +EXAMPLES = ''' +- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file + transport: local +- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file + delegate_to: other_system +''' + +import atexit +import base64 +import httplib +import urllib +import mmap +import errno +import socket + +def vmware_path(datastore, datacenter, path): + ''' Constructs a URL path that VSphere accepts reliably ''' + path = "/folder/%s" % path.lstrip("/") + if not path.startswith("/"): + path = "/" + path + params = dict( dsName = datastore ) + if datacenter: + params["dcPath"] = datacenter + params = urllib.urlencode(params) + return "%s?%s" % (path, params) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + host = dict(required=True, aliases=[ 'hostname' ]), + login = dict(required=True, aliases=[ 'username' ]), + password = dict(required=True), + src = dict(required=True, aliases=[ 'name' ]), + datacenter = dict(required=True), + datastore = dict(required=True), + dest = dict(required=True, aliases=[ 'path' ]), + ), + # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable + supports_check_mode = False, + ) + + host = module.params.get('host') + login = module.params.get('login') + password = module.params.get('password') + src = module.params.get('src') + datacenter = module.params.get('datacenter') + datastore = module.params.get('datastore') + dest = module.params.get('dest') + + fd = open(src, "rb") + atexit.register(fd.close) + + data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ) + atexit.register(data.close) + + conn = httplib.HTTPSConnection(host) + atexit.register(conn.close) + + remote_path = vmware_path(datastore, datacenter, dest) + auth = base64.encodestring('%s:%s' % (login, password)).rstrip() + headers = { + "Content-Type": "application/octet-stream", + "Content-Length": str(len(data)), + "Authorization": "Basic %s" % auth, + } + + # URL is only used in JSON output (helps troubleshooting) + url = 'https://%s%s' % (host, remote_path) + + try: + conn.request("PUT", remote_path, body=data, headers=headers) + except socket.error, e: + if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET: + # VSphere resets connection if the file is in use and cannot be replaced + module.fail_json(msg='Failed to upload, image probably in use', status=e[0], reason=str(e), url=url) + else: + module.fail_json(msg=str(e), status=e[0], reason=str(e), url=url) + + resp = conn.getresponse() + + if resp.status in range(200, 300): + module.exit_json(changed=True, status=resp.status, reason=resp.reason, url=url) + else: + module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/cloud/webfaction/__init__.py b/cloud/webfaction/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py new file mode 100644 index 00000000000..3e42ec1265e --- /dev/null +++ b/cloud/webfaction/webfaction_app.py @@ -0,0 +1,180 @@ +#! /usr/bin/python +# +# Create a Webfaction application using Ansible and the Webfaction API +# +# Valid application types can be found by looking here: +# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: webfaction_app +short_description: Add or remove applications on a Webfaction host +description: + - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +version_added: "2.0" +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + name: + description: + - The name of the application + required: true + + state: + description: + - Whether the application should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list. + required: true + + autostart: + description: + - Whether the app should restart with an autostart.cgi script + required: false + default: "no" + + extra_info: + description: + - Any extra parameters required by the app + required: false + default: null + + open_port: + required: false + default: false + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: Create a test app + webfaction_app: + name="my_wsgi_app1" + state=present + type=mod_wsgi35-python27 + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + +import xmlrpclib + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(required=False, choices=['present', 'absent'], default='present'), + type = dict(required=True), + autostart = dict(required=False, choices=BOOLEANS, default=False), + extra_info = dict(required=False, default=""), + port_open = dict(required=False, choices=BOOLEANS, default=False), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + app_name = module.params['name'] + app_type = module.params['type'] + app_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + app_list = webfaction.list_apps(session_id) + app_map = dict([(i['name'], i) for i in app_list]) + existing_app = app_map.get(app_name) + + result = {} + + # Here's where the real stuff happens + + if app_state == 'present': + + # Does an app with this name already exist? + if existing_app: + if existing_app['type'] != app_type: + module.fail_json(msg="App already exists with different type. Please fix by hand.") + + # If it exists with the right type, we don't change it + # Should check other parameters. + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, create the app + result.update( + webfaction.create_app( + session_id, app_name, app_type, + module.boolean(module.params['autostart']), + module.params['extra_info'], + module.boolean(module.params['port_open']) + ) + ) + + elif app_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_app: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_app(session_id, app_name) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(app_state)) + + + module.exit_json( + changed = True, + result = result + ) + +from ansible.module_utils.basic import * +main() + diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py new file mode 100644 index 00000000000..f420490711c --- /dev/null +++ b/cloud/webfaction/webfaction_db.py @@ -0,0 +1,184 @@ +#! /usr/bin/python +# +# Create a webfaction database using Ansible and the Webfaction API +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser and Andy Baker 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: webfaction_db +short_description: Add or remove a database on Webfaction +description: + - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +version_added: "2.0" +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + name: + description: + - The name of the database + required: true + + state: + description: + - Whether the database should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of database to create. + required: true + choices: ['mysql', 'postgresql'] + + password: + description: + - The password for the new database user. + required: false + default: None + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + # This will also create a default DB user with the same + # name as the database, and the specified password. + + - name: Create a database + webfaction_db: + name: "{{webfaction_user}}_db1" + password: mytestsql + type: mysql + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + # Note that, for symmetry's sake, deleting a database using + # 'state: absent' will also delete the matching user. + +''' + +import socket +import xmlrpclib + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(required=False, choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + type = dict(required=True), + password = dict(required=False, default=None), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + db_name = module.params['name'] + db_state = module.params['state'] + db_type = module.params['type'] + db_passwd = module.params['password'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + db_list = webfaction.list_dbs(session_id) + db_map = dict([(i['name'], i) for i in db_list]) + existing_db = db_map.get(db_name) + + user_list = webfaction.list_db_users(session_id) + user_map = dict([(i['username'], i) for i in user_list]) + existing_user = user_map.get(db_name) + + result = {} + + # Here's where the real stuff happens + + if db_state == 'present': + + # Does an database with this name already exist? + if existing_db: + # Yes, but of a different type - fail + if existing_db['db_type'] != db_type: + module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") + + # If it exists with the right type, we don't change anything. + module.exit_json( + changed = False, + ) + + + if not module.check_mode: + # If this isn't a dry run, create the db + # and default user. + result.update( + webfaction.create_db( + session_id, db_name, db_type, db_passwd + ) + ) + + elif db_state == 'absent': + + # If this isn't a dry run... + if not module.check_mode: + + if not (existing_db or existing_user): + module.exit_json(changed = False,) + + if existing_db: + # Delete the db if it exists + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + if existing_user: + # Delete the default db user if it exists + result.update( + webfaction.delete_db_user(session_id, db_name, db_type) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(db_state)) + + module.exit_json( + changed = True, + result = result + ) + +from ansible.module_utils.basic import * +main() + diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py new file mode 100644 index 00000000000..0b35faf110f --- /dev/null +++ b/cloud/webfaction/webfaction_domain.py @@ -0,0 +1,171 @@ +#! /usr/bin/python +# +# Create Webfaction domains and subdomains using Ansible and the Webfaction API +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: webfaction_domain +short_description: Add or remove domains and subdomains on Webfaction +description: + - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +version_added: "2.0" +notes: + - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the domain + required: true + + state: + description: + - Whether the domain should exist + required: false + choices: ['present', 'absent'] + default: "present" + + subdomains: + description: + - Any subdomains to create. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: Create a test domain + webfaction_domain: + name: mydomain.com + state: present + subdomains: + - www + - blog + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + - name: Delete test domain and any subdomains + webfaction_domain: + name: mydomain.com + state: absent + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + +''' + +import socket +import xmlrpclib + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(required=False, choices=['present', 'absent'], default='present'), + subdomains = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + domain_name = module.params['name'] + domain_state = module.params['state'] + domain_subdomains = module.params['subdomains'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + domain_list = webfaction.list_domains(session_id) + domain_map = dict([(i['domain'], i) for i in domain_list]) + existing_domain = domain_map.get(domain_name) + + result = {} + + # Here's where the real stuff happens + + if domain_state == 'present': + + # Does an app with this name already exist? + if existing_domain: + + if set(existing_domain['subdomains']) >= set(domain_subdomains): + # If it exists with the right subdomains, we don't change anything. + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_domain( + *positional_args + ) + ) + + elif domain_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_domain: + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_domain(*positional_args) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(domain_state)) + + module.exit_json( + changed = True, + result = result + ) + +from ansible.module_utils.basic import * +main() + diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py new file mode 100644 index 00000000000..7547b6154e5 --- /dev/null +++ b/cloud/webfaction/webfaction_mailbox.py @@ -0,0 +1,139 @@ +#! /usr/bin/python +# +# Create webfaction mailbox using Ansible and the Webfaction API +# +# ------------------------------------------ +# (c) Quentin Stafford-Fraser and Andy Baker 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: webfaction_mailbox +short_description: Add or remove mailboxes on Webfaction +description: + - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +version_added: "2.0" +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + mailbox_name: + description: + - The name of the mailbox + required: true + + mailbox_password: + description: + - The password for the mailbox + required: true + default: null + + state: + description: + - Whether the mailbox should exist + required: false + choices: ['present', 'absent'] + default: "present" + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: Create a mailbox + webfaction_mailbox: + mailbox_name="mybox" + mailbox_password="myboxpw" + state=present + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + +import socket +import xmlrpclib + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec=dict( + mailbox_name=dict(required=True), + mailbox_password=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + login_name=dict(required=True), + login_password=dict(required=True), + ), + supports_check_mode=True + ) + + mailbox_name = module.params['mailbox_name'] + site_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + mailbox_list = webfaction.list_mailboxes(session_id) + existing_mailbox = mailbox_name in mailbox_list + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a mailbox with this name already exist? + if existing_mailbox: + module.exit_json(changed=False,) + + positional_args = [session_id, mailbox_name] + + if not module.check_mode: + # If this isn't a dry run, create the mailbox + result.update(webfaction.create_mailbox(*positional_args)) + + elif site_state == 'absent': + + # If the mailbox is already not there, nothing changed. + if not existing_mailbox: + module.exit_json(changed=False) + + if not module.check_mode: + # If this isn't a dry run, delete the mailbox + result.update(webfaction.delete_mailbox(session_id, mailbox_name)) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json(changed=True, result=result) + + +from ansible.module_utils.basic import * +main() + diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py new file mode 100644 index 00000000000..57eae39c0dc --- /dev/null +++ b/cloud/webfaction/webfaction_site.py @@ -0,0 +1,208 @@ +#! /usr/bin/python +# +# Create Webfaction website using Ansible and the Webfaction API +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: webfaction_site +short_description: Add or remove a website on a Webfaction host +description: + - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +version_added: "2.0" +notes: + - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. + - If a site of the same name exists in the account but on a different host, the operation will exit. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the website + required: true + + state: + description: + - Whether the website should exist + required: false + choices: ['present', 'absent'] + default: "present" + + host: + description: + - The webfaction host on which the site should be created. + required: true + + https: + description: + - Whether or not to use HTTPS + required: false + choices: BOOLEANS + default: 'false' + + site_apps: + description: + - A mapping of URLs to apps + required: false + + subdomains: + description: + - A list of subdomains associated with this site. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: create website + webfaction_site: + name: testsite1 + state: present + host: myhost.webfaction.com + subdomains: + - 'testsite1.my_domain.org' + site_apps: + - ['testapp1', '/'] + https: no + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket +import xmlrpclib + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(required=False, choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + host = dict(required=True), + https = dict(required=False, choices=BOOLEANS, default=False), + subdomains = dict(required=False, default=[]), + site_apps = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + site_name = module.params['name'] + site_state = module.params['state'] + site_host = module.params['host'] + site_ip = socket.gethostbyname(site_host) + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + site_list = webfaction.list_websites(session_id) + site_map = dict([(i['name'], i) for i in site_list]) + existing_site = site_map.get(site_name) + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a site with this name already exist? + if existing_site: + + # If yes, but it's on a different IP address, then fail. + # If we wanted to allow relocation, we could add a 'relocate=true' option + # which would get the existing IP address, delete the site there, and create it + # at the new address. A bit dangerous, perhaps, so for now we'll require manual + # deletion if it's on another host. + + if existing_site['ip'] != site_ip: + module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") + + # If it's on this host and the key parameters are the same, nothing needs to be done. + + if (existing_site['https'] == module.boolean(module.params['https'])) and \ + (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ + (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): + module.exit_json( + changed = False + ) + + positional_args = [ + session_id, site_name, site_ip, + module.boolean(module.params['https']), + module.params['subdomains'], + ] + for a in module.params['site_apps']: + positional_args.append( (a[0], a[1]) ) + + if not module.check_mode: + # If this isn't a dry run, create or modify the site + result.update( + webfaction.create_website( + *positional_args + ) if not existing_site else webfaction.update_website ( + *positional_args + ) + ) + + elif site_state == 'absent': + + # If the site's already not there, nothing changed. + if not existing_site: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the site + result.update( + webfaction.delete_website(session_id, site_name, site_ip) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json( + changed = True, + result = result + ) + + + +from ansible.module_utils.basic import * +main() + diff --git a/clustering/consul.py b/clustering/consul.py new file mode 100644 index 00000000000..083173230f7 --- /dev/null +++ b/clustering/consul.py @@ -0,0 +1,507 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul +short_description: "Add, modify & delete services within a consul cluster. + See http://consul.io for more details." +description: + - registers services and checks for an agent with a consul cluster. A service + is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. + Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:'. + Node level checks require a check_name and optionally a check_id. + Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An api method is planned to supply this metadata so at that + stage change management will be added. +requirements: + - "python >= 2.6" + - python-consul + - requests +version_added: "2.0" +author: "Steve Gargan (@sgargan)" +options: + state: + description: + - register or deregister the consul service, defaults to present + required: true + choices: ['present', 'absent'] + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false + service_id: + description: + - the ID for the service, must be unique per node, defaults to the + service name if the service name is supplied + required: false + default: service_name if supplied + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 + notes: + description: + - Notes to attach to check when registering it. + required: false + default: None + service_port: + description: + - the port on which the service is listening required for + registration of a service, i.e. if service_name or service_id is set + required: false + tags: + description: + - a list of tags that will be attached to the service registration. + required: false + default: None + script: + description: + - the script/command that will be run periodically to check the health + of the service. Scripts require an interval and vise versa + required: false + default: None + interval: + description: + - the interval at which the service check will be run. This is a number + with a s or m suffix to signify the units of seconds or minutes e.g + 15s or 1m. If no suffix is supplied, m will be used by default e.g. + 1 will be 1m. Required if the script param is specified. + required: false + default: None + check_id: + description: + - an ID for the service check, defaults to the check name, ignored if + part of a service definition. + required: false + default: None + check_name: + description: + - a name for the service check, defaults to the check id. required if + standalone, ignored if part of service definition. + required: false + default: None + ttl: + description: + - checks can be registered with a ttl instead of a script and interval + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g 15s or 1m. If no suffix + is supplied, m will be used by default e.g. 1 will be 1m + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set. May be required to + register services. + required: false + default: None +""" + +EXAMPLES = ''' + - name: register nginx service with the local consul agent + consul: + name: nginx + service_port: 80 + + - name: register nginx service with curl check + consul: + name: nginx + service_port: 80 + script: "curl http://localhost" + interval: 60s + + - name: register nginx with some service tags + consul: + name: nginx + service_port: 80 + tags: + - prod + - webservers + + - name: remove nginx service + consul: + name: nginx + state: absent + + - name: create a node level check to test disk usage + consul: + check_name: Disk usage + check_id: disk_usage + script: "/opt/disk_usage.py" + interval: 5m + +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +def register_with_consul(module): + + state = module.params.get('state') + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params.get('service_id') or module.params.get('service_name') + check_id = module.params.get('check_id') or module.params.get('check_name') + if not (service_id or check_id): + module.fail_json(msg='services and checks are removed by id or name.'\ + ' please supply a service id/name or a check id/name') + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name: + module.fail_json(msg='a check name is required for a node level check,'\ + ' one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id(consul_api, service.id) + + # there is no way to retreive the details of checks so if a check is present + # in the service it must be reregistered + if service.has_checks() or not existing or not existing == service: + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=map(lambda x: x.to_dict(), service.checks), + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def get_service_by_id(consul_api, service_id): + ''' iterate the registered services and find one with the given id ''' + for name, service in consul_api.agent.services().iteritems(): + if service['ID'] == service_id: + return ConsulService(loaded=service) + + +def parse_check(module): + + if module.params.get('script') and module.params.get('ttl'): + module.fail_json( + msg='check are either script or ttl driven, supplying both does'\ + ' not make sense') + + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'): + + return ConsulCheck( + module.params.get('check_id'), + module.params.get('check_name'), + module.params.get('check_node'), + module.params.get('check_host'), + module.params.get('script'), + module.params.get('interval'), + module.params.get('ttl'), + module.params.get('notes') + ) + + +def parse_service(module): + + if module.params.get('service_name') and module.params.get('service_port'): + return ConsulService( + module.params.get('service_id'), + module.params.get('service_name'), + module.params.get('service_port'), + module.params.get('tags'), + ) + elif module.params.get('service_name') and not module.params.get('service_port'): + + module.fail_json( + msg="service_name supplied but no service_port, a port is required"\ + " to configure a service. Did you configure the 'port' "\ + "argument meaning 'service_port'?") + + +class ConsulService(): + + def __init__(self, service_id=None, name=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.port = port + self.tags = tags + self.checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + if len(self.checks) > 0: + check = self.checks[0] + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags, + script=check.script, + interval=check.interval, + ttl=check.ttl) + else: + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags) + + def add_check(self, check): + self.checks.append(check) + + def checks(self): + return self.checks + + def has_checks(self): + return len(self.checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.id == other.id + and self.name == other.name + and self.port == other.port + and self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self.checks) > 0: + data['check'] = self.checks[0].to_dict() + return data + + +class ConsulCheck(): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.script = script + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.notes = notes + self.node = node + self.host = host + + + + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, + script=self.script, + interval=self.interval, + ttl=self.ttl, notes=self.notes) + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.check_id == other.check_id + and self.name == other.name + and self.script == script + and self.interval == interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + return data + + def _add(self, data, key, attr=None): + try: + if attr == None: + attr = key + data[key] = getattr(self, attr) + except: + pass + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + check_id=dict(required=False), + check_name=dict(required=False), + check_node=dict(required=False), + check_host=dict(required=False), + notes=dict(required=False), + script=dict(required=False), + service_id=dict(required=False), + service_name=dict(required=False), + service_port=dict(required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), + tags=dict(required=False, type='list'), + token=dict(required=False) + ), + supports_check_mode=False, + ) + + test_dependencies(module) + + try: + register_with_consul(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py new file mode 100644 index 00000000000..250de24e2a3 --- /dev/null +++ b/clustering/consul_acl.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_acl +short_description: "manipulate consul acl keys and rules" +description: + - allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/internals/acl.html. +requirements: + - "python >= 2.6" + - python-consul + - pyhcl + - requests +version_added: "2.0" +author: "Steve Gargan (@sgargan)" +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + state: + description: + - whether the ACL pair should be present or absent, defaults to present + required: false + choices: ['present', 'absent'] + type: + description: + - the type of token that should be created, either management or + client, defaults to client + choices: ['client', 'management'] + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + token: + description: + - the token key indentifying an ACL rule set. If generated by consul + this will be a UUID. + required: false + rules: + description: + - an list of the rules that should be associated with a given key/token. + required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + +EXAMPLES = ''' + - name: create an acl token with rules + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + name: 'Foo access' + rules: + - key: 'foo' + policy: read + - key: 'private/foo' + policy: deny + + - name: remove a token + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e' + state: absent +''' + +import sys +import urllib2 + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +try: + import hcl + pyhcl_installed = True +except ImportError: + pyhcl_installed = False + +from requests.exceptions import ConnectionError + +def execute(module): + + state = module.params.get('state') + + if state == 'present': + update_acl(module) + else: + remove_acl(module) + + +def update_acl(module): + + rules = module.params.get('rules') + state = module.params.get('state') + token = module.params.get('token') + token_type = module.params.get('token_type') + mgmt = module.params.get('mgmt_token') + name = module.params.get('name') + consul = get_consul_api(module, mgmt) + changed = False + + try: + + if token: + existing_rules = load_rules_for_token(module, consul, token) + supplied_rules = yml_to_rules(module, rules) + print existing_rules + print supplied_rules + changed = not existing_rules == supplied_rules + if changed: + y = supplied_rules.to_hcl() + token = consul.acl.update( + token, + name=name, + type=token_type, + rules=supplied_rules.to_hcl()) + else: + try: + rules = yml_to_rules(module, rules) + if rules.are_rules(): + rules = rules.to_json() + else: + rules = None + + token = consul.acl.create( + name=name, type=token_type, rules=rules) + changed = True + except Exception, e: + module.fail_json( + msg="No token returned, check your managment key and that \ + the host is in the acl datacenter %s" % e) + except Exception, e: + module.fail_json(msg="Could not create/update acl %s" % e) + + module.exit_json(changed=changed, + token=token, + rules=rules, + name=name, + type=token_type) + + +def remove_acl(module): + state = module.params.get('state') + token = module.params.get('token') + mgmt = module.params.get('mgmt_token') + + consul = get_consul_api(module, token=mgmt) + changed = token and consul.acl.info(token) + if changed: + token = consul.acl.destroy(token) + + module.exit_json(changed=changed, token=token) + + +def load_rules_for_token(module, consul_api, token): + try: + rules = Rules() + info = consul_api.acl.info(token) + if info and info['Rules']: + rule_set = to_ascii(info['Rules']) + for rule in hcl.loads(rule_set).values(): + for key, policy in rule.iteritems(): + rules.add_rule(Rule(key, policy['policy'])) + return rules + except Exception, e: + module.fail_json( + msg="Could not load rule list from retrieved rule data %s, %s" % ( + token, e)) + + return json_to_rules(module, loaded) + +def to_ascii(unicode_string): + if isinstance(unicode_string, unicode): + return unicode_string.encode('ascii', 'ignore') + return unicode_string + +def yml_to_rules(module, yml_rules): + rules = Rules() + if yml_rules: + for rule in yml_rules: + if not('key' in rule or 'policy' in rule): + module.fail_json(msg="a rule requires a key and a policy.") + rules.add_rule(Rule(rule['key'], rule['policy'])) + return rules + +template = '''key "%s" { + policy = "%s" +}''' + +class Rules: + + def __init__(self): + self.rules = {} + + def add_rule(self, rule): + self.rules[rule.key] = rule + + def are_rules(self): + return len(self.rules) > 0 + + def to_json(self): + rules = {} + for key, rule in self.rules.iteritems(): + rules[key] = {'policy': rule.policy} + return json.dumps({'keys': rules}) + + def to_hcl(self): + + rules = "" + for key, rule in self.rules.iteritems(): + rules += template % (key, rule.policy) + + return to_ascii(rules) + + def __eq__(self, other): + if not (other or isinstance(other, self.__class__) + or len(other.rules) == len(self.rules)): + return False + + for name, other_rule in other.rules.iteritems(): + if not name in self.rules: + return False + rule = self.rules[name] + + if not (rule and rule == other_rule): + return False + return True + + def __str__(self): + return self.to_hcl() + +class Rule: + + def __init__(self, key, policy): + self.key = key + self.policy = policy + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.key == other.key + and self.policy == other.policy) + def __hash__(self): + return hash(self.key) ^ hash(self.policy) + + def __str__(self): + return '%s %s' % (self.key, self.policy) + +def get_consul_api(module, token=None): + if not token: + token = token = module.params.get('token') + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=token) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + + if not pyhcl_installed: + module.fail_json( msg="pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl") + +def main(): + argument_spec = dict( + mgmt_token=dict(required=True), + host=dict(default='localhost'), + name=dict(required=False), + port=dict(default=8500, type='int'), + rules=dict(default=None, required=False, type='list'), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False), + token_type=dict( + required=False, choices=['client', 'management'], default='client') + ) + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py new file mode 100644 index 00000000000..2ba3a0315a3 --- /dev/null +++ b/clustering/consul_kv.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_kv +short_description: "manipulate entries in the key/value store of a consul + cluster. See http://www.consul.io/docs/agent/http.html#kv for more details." +description: + - allows the addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as 'value'. If the key + represents a prefix then Note that when a value is removed, the existing + value if any is returned as part of the results. +requirements: + - "python >= 2.6" + - python-consul + - requests +version_added: "2.0" +author: "Steve Gargan (@sgargan)" +options: + state: + description: + - the action to take with the supplied key and value. If the state is + 'present', the key contents will be set to the value supplied, + 'changed' will be set to true only if the value was different to the + current contents. The state 'absent' will remove the key/value pair, + again 'changed' will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states 'acquire' or + 'release' respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + required: false + choices: ['present', 'absent', 'acquire', 'release'] + default: present + key: + description: + - the key at which the value should be stored. + required: true + value: + description: + - the value should be associated with the given key, required if state + is present + required: true + recurse: + description: + - if the key represents a prefix, each entry with the prefix can be + retrieved by setting this to true. + required: false + default: false + session: + description: + - the session that should be used to acquire or release a lock + associated with a key/value pair + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set that controls access to + the key value pair + required: false + default: None + cas: + description: + - used when acquiring a lock with a session. If the cas is 0, then + Consul will only put the key if it does not already exist. If the + cas value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + required: false + default: None + flags: + description: + - opaque integer value that can be passed when setting a value. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + + +EXAMPLES = ''' + + - name: add or update the value associated with a key in the key/value store + consul_kv: + key: somekey + value: somevalue + + - name: remove a key from the store + consul_kv: + key: somekey + state: absent + + - name: add a node to an arbitrary group via consul inventory (see consul.ini) + consul_kv: + key: ansible/groups/dc1/somenode + value: 'top_secret' +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +from requests.exceptions import ConnectionError + +def execute(module): + + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + if state == 'present': + add_value(module) + else: + remove_value(module) + + +def lock(module, state): + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + if state == 'acquire': + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=successful, + index=index, + key=key) + + +def add_value(module): + + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get(key) + + changed = not existing or (existing and existing['Value'] != value) + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing != None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + + argument_spec = dict( + cas=dict(required=False), + flags=dict(required=False), + key=dict(required=True), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + recurse=dict(required=False, type='bool'), + retrieve=dict(required=False, default=True), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False, default='anonymous'), + value=dict(required=False) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_session.py b/clustering/consul_session.py new file mode 100644 index 00000000000..ef4646c35e4 --- /dev/null +++ b/clustering/consul_session.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_session +short_description: "manipulate consul sessions" +description: + - allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found here http://www.consul.io/docs/internals/sessions.html +requirements: + - "python >= 2.6" + - python-consul + - requests +version_added: "2.0" +author: "Steve Gargan (@sgargan)" +options: + state: + description: + - whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the ID for the + session is returned in the output. If absent, the name or ID is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying info, node or list for the state; for node or info, the + node name or session id is required as parameter. + required: false + choices: ['present', 'absent', 'info', 'node', 'list'] + default: present + name: + description: + - the name that should be associated with the session. This is opaque + to Consul and not required. + required: false + default: None + delay: + description: + - the optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. Valid units for delays + include 'ns', 'us', 'ms', 's', 'm', 'h' + default: 15s + required: false + node: + description: + - the name of the node that with which the session will be associated. + by default this is the name of the agent. + required: false + default: None + datacenter: + description: + - name of the datacenter in which the session exists or should be + created. + required: false + default: None + checks: + description: + - a list of checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 +""" + +EXAMPLES = ''' +- name: register basic session with consul + consul_session: + name: session1 + +- name: register a session with an existing check + consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20s + +- name: retrieve info about session by id + consul_session: id=session_id state=info + +- name: retrieve active sessions + consul_session: state=list +''' + +import sys +import urllib2 + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError, e: + python_consul_installed = False + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul.session.list(dc=datacenter) + #ditch the index, this can be grabbed from the results + if sessions_list and sessions_list[1]: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + if not node: + module.fail_json( + msg="node name is required to retrieve sessions for node") + sessions = consul.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + if not session_id: + module.fail_json( + msg="session_id is required to retrieve indvidual session info") + + session_by_id = consul.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception, e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + session_id = module.params.get('id') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + + consul = get_consul_api(module) + changed = True + + try: + + session = consul.session.create( + name=name, + node=node, + lock_delay=validate_duration('delay', delay), + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + delay=delay, + checks=checks, + node=node) + except Exception, e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + if not session_id: + module.fail_json(msg="""A session id must be supplied in order to + remove a session.""") + + consul = get_consul_api(module) + changed = False + + try: + session = consul.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception, e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + +def validate_duration(name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + +def main(): + argument_spec = dict( + checks=dict(default=None, required=False, type='list'), + delay=dict(required=False,type='str', default='15s'), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + id=dict(required=False), + name=dict(required=False), + node=dict(required=False), + state=dict(default='present', + choices=['present', 'absent', 'info', 'node', 'list']) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/commands/__init__.py b/commands/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/commands/expect.py b/commands/expect.py new file mode 100644 index 00000000000..e8f7a049836 --- /dev/null +++ b/commands/expect.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import datetime + +try: + import pexpect + HAS_PEXPECT = True +except ImportError: + HAS_PEXPECT = False + + +DOCUMENTATION = ''' +--- +module: expect +version_added: 2.0 +short_description: Executes a command and responds to prompts +description: + - The M(expect) module executes a command and responds to prompts + - The given command will be executed on all selected nodes. It will not be + processed through the shell, so variables like C($HOME) and operations + like C("<"), C(">"), C("|"), and C("&") will not work +options: + command: + description: + - the command module takes command to run. + required: true + creates: + description: + - a filename, when it already exists, this step will B(not) be run. + required: false + removes: + description: + - a filename, when it does not exist, this step will B(not) be run. + required: false + chdir: + description: + - cd into this directory before running the command + required: false + responses: + description: + - Mapping of expected string and string to respond with + required: true + timeout: + description: + - Amount of time in seconds to wait for the expected strings + default: 30 + echo: + description: + - Whether or not to echo out your response strings + default: false +requirements: + - python >= 2.6 + - pexpect >= 3.3 +notes: + - If you want to run a command through the shell (say you are using C(<), + C(>), C(|), etc), you must specify a shell in the command such as + C(/bin/bash -c "/path/to/something | grep else") +author: "Matt Martz (@sivel)" +''' + +EXAMPLES = ''' +- expect: + command: passwd username + responses: + (?i)password: "MySekretPa$$word" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(required=True), + chdir=dict(), + creates=dict(), + removes=dict(), + responses=dict(type='dict', required=True), + timeout=dict(type='int', default=30), + echo=dict(type='bool', default=False), + ) + ) + + if not HAS_PEXPECT: + module.fail_json(msg='The pexpect python module is required') + + chdir = module.params['chdir'] + args = module.params['command'] + creates = module.params['creates'] + removes = module.params['removes'] + responses = module.params['responses'] + timeout = module.params['timeout'] + echo = module.params['echo'] + + events = dict() + for key, value in responses.iteritems(): + events[key.decode()] = u'%s\n' % value.rstrip('\n').decode() + + if args.strip() == '': + module.fail_json(rc=256, msg="no command given") + + if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) + os.chdir(chdir) + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + v = os.path.expanduser(creates) + if os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s exists" % v, + changed=False, + stderr=False, + rc=0 + ) + + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + v = os.path.expanduser(removes) + if not os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s does not exist" % v, + changed=False, + stderr=False, + rc=0 + ) + + startd = datetime.datetime.now() + + try: + out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True, + events=events, cwd=chdir, echo=echo) + except pexpect.ExceptionPexpect, e: + module.fail_json(msg='%s' % e) + + endd = datetime.datetime.now() + delta = endd - startd + + if out is None: + out = '' + + module.exit_json( + cmd=args, + stdout=out.rstrip('\r\n'), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + ) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 3a3cf4dfff1..ede8004945b 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -87,11 +87,19 @@ options: required: false default: present choices: [ "present", "absent" ] + update_password: + required: false + default: always + choices: ['always', 'on_create'] + version_added: "2.1" + description: + - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] -author: Elliott Foster +author: "Elliott Foster (@elliotttf)" ''' EXAMPLES = ''' @@ -134,7 +142,15 @@ else: # MongoDB module specific support methods. # +def user_find(client, user): + for mongo_user in client["admin"].system.users.find(): + if mongo_user['user'] == user: + return mongo_user + return False + def user_add(module, client, db_name, user, password, roles): + #pymono's user_add is a _create_or_update_user so we won't know if it was changed or updated + #without reproducing a lot of the logic in database.py of pymongo db = client[db_name] if roles is None: db.add_user(user, password, False) @@ -147,9 +163,13 @@ def user_add(module, client, db_name, user, password, roles): err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' module.fail_json(msg=err_msg) -def user_remove(client, db_name, user): - db = client[db_name] - db.remove_user(user) +def user_remove(module, client, db_name, user): + exists = user_find(client, user) + if exists: + db = client[db_name] + db.remove_user(user) + else: + module.exit_json(changed=False, user=user) def load_mongocnf(): config = ConfigParser.RawConfigParser() @@ -184,6 +204,7 @@ def main(): ssl=dict(default=False), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default="always", choices=["always", "on_create"]), ) ) @@ -201,6 +222,7 @@ def main(): ssl = module.params['ssl'] roles = module.params['roles'] state = module.params['state'] + update_password = module.params['update_password'] try: if replica_set: @@ -208,32 +230,30 @@ def main(): else: client = MongoClient(login_host, int(login_port), ssl=ssl) - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: + elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password) + elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): + if db_name != "admin": + module.fail_json(msg='The localhost login exception only allows the first admin account to be created') + #else: this has to be the first admin user added except ConnectionFailure, e: module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': - if password is None: - module.fail_json(msg='password parameter required when adding a user') + if password is None and update_password == 'always': + module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') + + if update_password != 'always' and user_find(client, user): + password = None try: user_add(module, client, db_name, user, password, roles) @@ -242,7 +262,7 @@ def main(): elif state == 'absent': try: - user_remove(client, db_name, user) + user_remove(module, client, db_name, user) except OperationFailure, e: module.fail_json(msg='Unable to remove user: %s' % str(e)) diff --git a/database/misc/redis.py b/database/misc/redis.py index eb9654631e7..42e364a8e61 100644 --- a/database/misc/redis.py +++ b/database/misc/redis.py @@ -98,7 +98,7 @@ notes: this needs to be in the redis.conf in the masterauth variable requirements: [ redis ] -author: Xabier Larrakoetxea +author: "Xabier Larrakoetxea (@slok)" ''' EXAMPLES = ''' diff --git a/database/misc/riak.py b/database/misc/riak.py index b30e7dc485d..12586651887 100644 --- a/database/misc/riak.py +++ b/database/misc/riak.py @@ -26,6 +26,9 @@ description: - This module can be used to join nodes to a cluster, check the status of the cluster. version_added: "1.2" +author: + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" options: command: description: diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 07d09602b6b..f5d2d5cf630 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -30,6 +30,7 @@ short_description: Manage MySQL replication description: - Manages MySQL server replication, slave, master status get and change master host. version_added: "1.3" +author: "Balazs Pocze (@banyek)" options: mode: description: @@ -93,7 +94,7 @@ options: master_ssl: description: - same as mysql variable - possible values: 0,1 + choices: [ 0, 1 ] master_ssl_ca: description: - same as mysql variable @@ -109,7 +110,12 @@ options: master_ssl_cipher: description: - same as mysql variable - + master_auto_position: + description: + - does the host uses GTID based replication or not + required: false + default: null + version_added: "2.0" ''' EXAMPLES = ''' @@ -239,9 +245,10 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), + master_auto_position=dict(default=False, type='bool'), master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), @@ -279,6 +286,7 @@ def main(): master_ssl_cert = module.params["master_ssl_cert"] master_ssl_key = module.params["master_ssl_key"] master_ssl_cipher = module.params["master_ssl_cipher"] + master_auto_position = module.params["master_auto_position"] if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") @@ -304,10 +312,10 @@ def main(): try: if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") try: @@ -376,6 +384,8 @@ def main(): if master_ssl_cipher: chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s") chm_params['master_ssl_cipher'] = master_ssl_cipher + if master_auto_position: + chm.append("MASTER_AUTO_POSITION = 1") changemaster(cursor, chm, chm_params) module.exit_json(changed=True) elif mode in "startslave": diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py index d70107a4cf9..07ed48e9d03 100644 --- a/database/postgresql/postgresql_ext.py +++ b/database/postgresql/postgresql_ext.py @@ -65,7 +65,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Daniel Schep +author: "Daniel Schep (@dschep)" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_lang.py b/database/postgresql/postgresql_lang.py index ec0507b5508..f3b1baa4d9a 100644 --- a/database/postgresql/postgresql_lang.py +++ b/database/postgresql/postgresql_lang.py @@ -95,7 +95,7 @@ notes: systems, install the postgresql, libpq-dev, and python-psycopg2 packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Jens Depuydt +author: "Jens Depuydt (@jensdepuydt)" ''' EXAMPLES = ''' diff --git a/database/vertica/__init__.py b/database/vertica/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py new file mode 100644 index 00000000000..ed75667b139 --- /dev/null +++ b/database/vertica/vertica_configuration.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_configuration +version_added: '2.0' +short_description: Updates Vertica configuration parameters. +description: + - Updates Vertica configuration parameters. +options: + name: + description: + - Name of the parameter to update. + required: true + value: + description: + - Value of the parameter to be set. + required: true + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +""" + +EXAMPLES = """ +- name: updating load_balance_policy + vertica_configuration: name=failovertostandbyafter value='8 hours' +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py new file mode 100644 index 00000000000..705b74a04f5 --- /dev/null +++ b/database/vertica/vertica_facts.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_facts +version_added: '2.0' +short_description: Gathers Vertica database facts. +description: + - Gathers Vertica database facts. +options: + cluster: + description: + - Name of the cluster running the schema. + required: false + default: localhost + port: + description: + Database port to connect to. + required: false + default: 5433 + db: + description: + - Name of the database running the schema. + required: false + default: null + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +""" + +EXAMPLES = """ +- name: gathering vertica facts + vertica_facts: db=db_name +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + module.exit_json(changed=False, + ansible_facts={'vertica_schemas': schema_facts, + 'vertica_users': user_facts, + 'vertica_roles': role_facts, + 'vertica_configuration': configuration_facts, + 'vertica_nodes': node_facts}) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py new file mode 100644 index 00000000000..b7a0a5d66ef --- /dev/null +++ b/database/vertica/vertica_role.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_role +version_added: '2.0' +short_description: Adds or removes Vertica database roles and assigns roles to them. +description: + - Adds or removes Vertica database role and, optionally, assign other roles. +options: + name: + description: + - Name of the role to add or remove. + required: true + assigned_roles: + description: + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] + required: false + default: null + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a role. + required: false + choices: ['present', 'absent'] + default: present + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +""" + +EXAMPLES = """ +- name: creating a new vertica role + vertica_role: name=role_name db=db_name state=present + +- name: creating a new vertica role with other role assigned + vertica_role: name=role_name assigned_role=other_role_name state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + return False + return True + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py new file mode 100644 index 00000000000..39ccb0b60e8 --- /dev/null +++ b/database/vertica/vertica_schema.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_schema +version_added: '2.0' +short_description: Adds or removes Vertica database schema and roles. +description: + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +options: + name: + description: + - Name of the schema to add or remove. + required: true + usage_roles: + description: + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] + required: false + default: null + create_roles: + description: + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] + required: false + default: null + owner: + description: + - Name of the user to set as owner of the schema. + required: false + default: null + state: + description: + - Whether to create C(present), or drop C(absent) a schema. + required: false + default: present + choices: ['present', 'absent'] + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +""" + +EXAMPLES = """ +- name: creating a new vertica schema + vertica_schema: name=schema_name db=db_name state=present + +- name: creating a new schema with specific schema owner + vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: creating a new schema with roles + vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: + return False + if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + return False + return True + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \ + cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(default=None, aliases=['usage_role']), + create_roles=dict(default=None, aliases=['create_role']), + owner=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py new file mode 100644 index 00000000000..7c52df3163a --- /dev/null +++ b/database/vertica/vertica_user.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_user +version_added: '2.0' +short_description: Adds or removes Vertica database users and assigns roles. +description: + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +options: + name: + description: + - Name of the user to add or remove. + required: true + profile: + description: + - Sets the user's profile. + required: false + default: null + resource_pool: + description: + - Sets the user's resource pool. + required: false + default: null + password: + description: + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + required: false + default: null + expired: + description: + - Sets the user's password expiration. + required: false + default: null + ldap: + description: + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). + required: false + default: null + roles: + description: + - Comma separated list of roles to assign to the user. + aliases: ['role'] + required: false + default: null + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a user. + required: false + choices: ['present', 'absent', 'locked'] + default: present + db: + description: + - Name of the Vertica database. + required: false + default: null + cluster: + description: + - Name of the Vertica cluster. + required: false + default: localhost + port: + description: + - Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + - The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + - The password used to authenticate with. + required: false + default: null +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +""" + +EXAMPLES = """ +- name: creating a new vertica user with password + vertica_user: name=user_name password=md5 db=db_name state=present + +- name: creating a new vertica user authenticated via ldap with roles assigned + vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \ + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'): + return False + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + return False + return True + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + if locked: + state = 'lock' + else: + state = 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(default=None), + resource_pool=dict(default=None), + password=dict(default=None), + expired=dict(type='bool', default=None), + ldap=dict(type='bool', default=None), + roles=dict(default=None, aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/files/patch.py b/files/patch.py index cd4b3130079..60629c922e9 100644 --- a/files/patch.py +++ b/files/patch.py @@ -22,7 +22,9 @@ DOCUMENTATION = ''' --- module: patch -author: Luis Alberto Perez Lazaro, Jakub Jirutka +author: + - "Jakub Jirutka (@jirutka)" + - "Luis Alberto Perez Lazaro (@luisperlaz)" version_added: 1.9 description: - Apply patch files using the GNU patch tool. @@ -43,7 +45,9 @@ options: aliases: [ "originalfile" ] src: description: - - Path of the patch file as accepted by the GNU patch tool. + - Path of the patch file as accepted by the GNU patch tool. If + C(remote_src) is False, the patch source file is looked up from the + module's "files" directory. required: true aliases: [ "patchfile" ] remote_src: @@ -61,6 +65,14 @@ options: required: false type: "int" default: "0" + backup: + version_added: "2.0" + description: + - passes --backup --version-control=numbered to patch, + producing numbered backup copies + required: false + type: "bool" + default: "False" note: - This module requires GNU I(patch) utility to be installed on the remote host. ''' @@ -97,7 +109,7 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0) return rc == 0 -def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False): +def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False): opts = ['--quiet', '--forward', '--batch', '--reject-file=-', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] @@ -105,10 +117,12 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru opts.append('--dry-run') if dest_file: opts.append("'%s'" % dest_file) + if backup: + opts.append('--backup --version-control=numbered') (rc, out, err) = patch_func(opts) if rc != 0: - msg = out if not err else err + msg = err or out raise PatchError(msg) @@ -120,6 +134,9 @@ def main(): 'basedir': {}, 'strip': {'default': 0, 'type': 'int'}, 'remote_src': {'default': False, 'type': 'bool'}, + # NB: for 'backup' parameter, semantics is slightly different from standard + # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") + 'backup': { 'default': False, 'type': 'bool' } }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -128,6 +145,7 @@ def main(): # Create type object as namespace for module params p = type('Params', (), module.params) + p.src = os.path.expanduser(p.src) if not os.access(p.src, R_OK): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) @@ -141,13 +159,18 @@ def main(): p.basedir = path.dirname(p.dest) patch_bin = module.get_bin_path('patch') + if patch_bin is None: + module.fail_json(msg="patch command not found") patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) + # patch need an absolute file name + p.src = os.path.abspath(p.src) + changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: - apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, - dry_run=module.check_mode) + apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + dry_run=module.check_mode, backup=p.backup ) changed = True except PatchError, e: module.fail_json(msg=str(e)) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py new file mode 100644 index 00000000000..fc69f490fad --- /dev/null +++ b/messaging/rabbitmq_binding.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: rabbitmq_binding +author: "Manuel Sousa (@manuel-sousa)" +version_added: "2.0" + +short_description: This module manages rabbitMQ bindings +description: + - This module uses rabbitMQ Rest API to create/delete bindings +requirements: [ python requests ] +options: + state: + description: + - Whether the exchange should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + name: + description: + - source exchange to create binding on + required: true + aliases: [ "src", "source" ] + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + - default vhost is / + required: false + default: "/" + destination: + description: + - destination exchange or queue for the binding + required: true + aliases: [ "dst", "dest" ] + destination_type: + description: + - Either queue or exchange + required: true + choices: [ "queue", "exchange" ] + aliases: [ "type", "dest_type" ] + routing_key: + description: + - routing key for the binding + - default is # + required: false + default: "#" + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false + default: {} +''' + +EXAMPLES = ''' +# Bind myQueue to directExchange with routing key info +- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info + +# Bind directExchange to topicExchange with routing key *.info +- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info" +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, aliases=[ "src", "source" ], type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + destination = dict(required=True, aliases=[ "dst", "dest"], type='str'), + destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'), + routing_key = dict(default='#', type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + if module.params['destination_type'] == "queue": + dest_type="q" + else: + dest_type="e" + + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'], + dest_type, + module.params['destination'], + urllib.quote(module.params['routing_key'],'') + ) + + # Check if exchange already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + binding_exists = True + response = r.json() + elif r.status_code==404: + binding_exists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if exchange exists", + details = r.text + ) + + if module.params['state']=='present': + change_required = not binding_exists + else: + change_required = binding_exists + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= change_required, + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if change_required: + if module.params['state'] == 'present': + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'], + dest_type, + module.params['destination'] + ) + + r = requests.post( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "routing_key": module.params['routing_key'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204 or r.status_code == 201: + module.exit_json( + changed = True, + name = module.params['name'], + destination = module.params['destination'] + ) + else: + module.fail_json( + msg = "Error creating exchange", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py new file mode 100644 index 00000000000..fb74298879b --- /dev/null +++ b/messaging/rabbitmq_exchange.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: rabbitmq_exchange +author: "Manuel Sousa (@manuel-sousa)" +version_added: "2.0" + +short_description: This module manages rabbitMQ exchanges +description: + - This module uses rabbitMQ Rest API to create/delete exchanges +requirements: [ python requests ] +options: + name: + description: + - Name of the exchange to create + required: true + state: + description: + - Whether the exchange should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + required: false + default: "/" + durable: + description: + - whether exchange is durable or not + required: false + choices: [ "yes", "no" ] + default: yes + exchange_type: + description: + - type for the exchange + required: false + choices: [ "fanout", "direct", "headers", "topic" ] + aliases: [ "type" ] + default: direct + auto_delete: + description: + - if the exchange should delete itself after all queues/exchanges unbound from it + required: false + choices: [ "yes", "no" ] + default: no + internal: + description: + - exchange is available only for other exchanges + required: false + choices: [ "yes", "no" ] + default: no + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false + default: {} +''' + +EXAMPLES = ''' +# Create direct exchange +- rabbitmq_exchange: name=directExchange + +# Create topic exchange on vhost +- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + durable = dict(default=True, choices=BOOLEANS, type='bool'), + auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), + internal = dict(default=False, choices=BOOLEANS, type='bool'), + exchange_type = dict(default='direct', aliases=['type'], type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + url = "http://%s:%s/api/exchanges/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'] + ) + + # Check if exchange already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + exchange_exists = True + response = r.json() + elif r.status_code==404: + exchange_exists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if exchange exists", + details = r.text + ) + + if module.params['state']=='present': + change_required = not exchange_exists + else: + change_required = exchange_exists + + # Check if attributes change on existing exchange + if not change_required and r.status_code==200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + response['internal'] == module.params['internal'] and + response['type'] == module.params['exchange_type'] + ): + module.fail_json( + msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges" + ) + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= change_required, + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "internal": module.params['internal'], + "type": module.params['exchange_type'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204: + module.exit_json( + changed = True, + name = module.params['name'] + ) + else: + module.fail_json( + msg = "Error creating exchange", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py index 2f78bd4ee15..6be18bdce3d 100644 --- a/messaging/rabbitmq_parameter.py +++ b/messaging/rabbitmq_parameter.py @@ -25,7 +25,7 @@ short_description: Adds or removes parameters to RabbitMQ description: - Manage dynamic, cluster-wide parameters for RabbitMQ version_added: "1.1" -author: Chris Hoffman +author: '"Chris Hoffman (@chrishoffman)"' options: component: description: diff --git a/messaging/rabbitmq_plugin.py b/messaging/rabbitmq_plugin.py index 53c38f978d5..db23df3fcc8 100644 --- a/messaging/rabbitmq_plugin.py +++ b/messaging/rabbitmq_plugin.py @@ -25,7 +25,7 @@ short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" -author: Chris Hoffman +author: '"Chris Hoffman (@chrishoffman)"' options: names: description: diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py index 800c3822d55..81d7068ec46 100644 --- a/messaging/rabbitmq_policy.py +++ b/messaging/rabbitmq_policy.py @@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ. description: - Manage the state of a virtual host in RabbitMQ. version_added: "1.5" -author: John Dewey +author: "John Dewey (@retr0h)" options: name: description: diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py new file mode 100644 index 00000000000..5a403a6b602 --- /dev/null +++ b/messaging/rabbitmq_queue.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: rabbitmq_queue +author: "Manuel Sousa (@manuel-sousa)" +version_added: "2.0" + +short_description: This module manages rabbitMQ queues +description: + - This module uses rabbitMQ Rest API to create/delete queues +requirements: [ python requests ] +options: + name: + description: + - Name of the queue to create + required: true + state: + description: + - Whether the queue should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + required: false + default: "/" + durable: + description: + - whether queue is durable or not + required: false + choices: [ "yes", "no" ] + default: yes + auto_delete: + description: + - if the queue should delete itself after all queues/queues unbound from it + required: false + choices: [ "yes", "no" ] + default: no + message_ttl: + description: + - How long a message can live in queue before it is discarded (milliseconds) + required: False + default: forever + auto_expires: + description: + - How long a queue can be unused before it is automatically deleted (milliseconds) + required: false + default: forever + max_length: + description: + - How many messages can the queue contain before it starts rejecting + required: false + default: no limit + dead_letter_exchange: + description: + - Optional name of an exchange to which messages will be republished if they + - are rejected or expire + required: false + default: None + dead_letter_routing_key: + description: + - Optional replacement routing key to use when a message is dead-lettered. + - Original routing key will be used if unset + required: false + default: None + arguments: + description: + - extra arguments for queue. If defined this argument is a key/value dictionary + required: false + default: {} +''' + +EXAMPLES = ''' +# Create a queue +- rabbitmq_queue: name=myQueue + +# Create a queue on remote host +- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + durable = dict(default=True, choices=BOOLEANS, type='bool'), + auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), + message_ttl = dict(default=None, type='int'), + auto_expires = dict(default=None, type='int'), + max_length = dict(default=None, type='int'), + dead_letter_exchange = dict(default=None, type='str'), + dead_letter_routing_key = dict(default=None, type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + url = "http://%s:%s/api/queues/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'] + ) + + # Check if queue already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + queue_exists = True + response = r.json() + elif r.status_code==404: + queue_exists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if queue exists", + details = r.text + ) + + if module.params['state']=='present': + change_required = not queue_exists + else: + change_required = queue_exists + + # Check if attributes change on existing queue + if not change_required and r.status_code==200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + ( + ( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or + ( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None ) + ) and + ( + ( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or + ( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None ) + ) and + ( + ( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or + ( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None ) + ) and + ( + ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or + ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None ) + ) and + ( + ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or + ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None ) + ) + ): + module.fail_json( + msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues", + ) + + + # Copy parameters to arguments as used by RabbitMQ + for k,v in { + 'message_ttl': 'x-message-ttl', + 'auto_expires': 'x-expires', + 'max_length': 'x-max-length', + 'dead_letter_exchange': 'x-dead-letter-exchange', + 'dead_letter_routing_key': 'x-dead-letter-routing-key' + }.items(): + if module.params[k]: + module.params['arguments'][v] = module.params[k] + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= change_required, + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204: + module.exit_json( + changed = True, + name = module.params['name'] + ) + else: + module.fail_json( + msg = "Error creating queue", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index f494ce802d9..6333e42282e 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -25,7 +25,7 @@ short_description: Adds or removes users to RabbitMQ description: - Add or remove users to RabbitMQ and assign permissions version_added: "1.1" -author: Chris Hoffman +author: '"Chris Hoffman (@chrishoffman)"' options: user: description: diff --git a/messaging/rabbitmq_vhost.py b/messaging/rabbitmq_vhost.py index fd4b04a683f..dbde32393cb 100644 --- a/messaging/rabbitmq_vhost.py +++ b/messaging/rabbitmq_vhost.py @@ -26,7 +26,7 @@ short_description: Manage the state of a virtual host in RabbitMQ description: - Manage the state of a virtual host in RabbitMQ version_added: "1.1" -author: Chris Hoffman +author: '"Chris Hoffman (@choffman)"' options: name: description: diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index e1c490b881b..a58df024182 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: airbrake_deployment version_added: "1.2" -author: Bruce Pennypacker +author: "Bruce Pennypacker (@bpennypacker)" short_description: Notify airbrake about app deployments description: - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) @@ -61,8 +61,7 @@ options: default: 'yes' choices: ['yes', 'no'] -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -72,6 +71,8 @@ EXAMPLES = ''' revision=4.2 ''' +import urllib + # =========================================== # Module execution. # diff --git a/monitoring/bigpanda.py b/monitoring/bigpanda.py index 11950287078..3bed44893b7 100644 --- a/monitoring/bigpanda.py +++ b/monitoring/bigpanda.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' --- module: bigpanda -author: BigPanda +author: "Hagai Kariti (@hkariti)" short_description: Notify BigPanda about deployments version_added: "1.8" description: @@ -162,7 +162,7 @@ def main(): module.exit_json(changed=True, **deployment) else: module.fail_json(msg=json.dumps(info)) - except Exception as e: + except Exception, e: module.fail_json(msg=str(e)) # import module snippets diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index da739d4306f..431a6ace1b9 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -34,7 +34,7 @@ short_description: Manage boundary meters description: - This module manages boundary meters version_added: "1.3" -author: curtis@serverascode.com +author: "curtis (@ccollicutt)" requirements: - Boundary API access - bprobe is required to send data, but not to register a meter @@ -213,7 +213,7 @@ def download_request(module, name, apiid, apikey, cert_type): cert_file = open(cert_file_path, 'w') cert_file.write(body) cert_file.close - os.chmod(cert_file_path, 0o600) + os.chmod(cert_file_path, 0600) except: module.fail_json("Could not write to certificate file") diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py new file mode 100644 index 00000000000..1585cd8080a --- /dev/null +++ b/monitoring/circonus_annotation.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# (c) 2014-2015, Epic Games, Inc. + +import requests +import time +import json + +DOCUMENTATION = ''' +--- +module: circonus_annotation +short_description: create an annotation in circonus +description: + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided +author: "Nick Harring (@NickatEpic)" +version_added: 2.0 +requirements: + - urllib3 + - requests + - time +options: + api_key: + description: + - Circonus API key + required: true + category: + description: + - Annotation Category + required: true + description: + description: + - Description of annotation + required: true + title: + description: + - Title of annotation + required: true + start: + description: + - Unix timestamp of event start, defaults to now + required: false + stop: + description: + - Unix timestamp of event end, defaults to now + duration + required: false + duration: + description: + - Duration in seconds of annotation, defaults to 0 + required: false +''' +EXAMPLES = ''' +# Create a simple annotation event with a source, defaults to start and end time of now +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' +# Create an annotation with a duration of 5 minutes and a default start time of now +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' + duration: 300 +# Create an annotation with a start_time and end_time +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' + start_time: 1395940006 + end_time: 1395954407 +''' +def post_annotation(annotation, api_key): + ''' Takes annotation dict and api_key string''' + base_url = 'https://api.circonus.com/v2' + anootate_post_endpoint = '/annotation' + resp = requests.post(base_url + anootate_post_endpoint, + headers=build_headers(api_key), data=json.dumps(annotation)) + resp.raise_for_status() + return resp + +def create_annotation(module): + ''' Takes ansible module object ''' + annotation = {} + if module.params['duration'] != None: + duration = module.params['duration'] + else: + duration = 0 + if module.params['start'] != None: + start = module.params['start'] + else: + start = int(time.time()) + if module.params['stop'] != None: + stop = module.params['stop'] + else: + stop = int(time.time())+ duration + annotation['start'] = int(start) + annotation['stop'] = int(stop) + annotation['category'] = module.params['category'] + annotation['description'] = module.params['description'] + annotation['title'] = module.params['title'] + return annotation +def build_headers(api_token): + '''Takes api token, returns headers with it included.''' + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} + return headers + +def main(): + '''Main function, dispatches logic''' + module = AnsibleModule( + argument_spec=dict( + start=dict(required=False, type='int'), + stop=dict(required=False, type='int'), + category=dict(required=True), + title=dict(required=True), + description=dict(required=True), + duration=dict(required=False, type='int'), + api_key=dict(required=True) + ) + ) + annotation = create_annotation(module) + try: + resp = post_annotation(annotation, module.params['api_key']) + except requests.exceptions.RequestException, err_str: + module.fail_json(msg='Request Failed', reason=err_str) + module.exit_json(changed=True, annotation=resp.json()) + +from ansible.module_utils.basic import * +main() diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 5d38dd4c31d..ebbad039dec 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -14,7 +14,7 @@ description: - "Allows to post events to DataDog (www.datadoghq.com) service." - "Uses http://docs.datadoghq.com/api/#events API." version_added: "1.3" -author: Artūras 'arturaz' Šlajus +author: "Artūras `arturaz` Šlajus (@arturaz)" notes: [] requirements: [urllib2] options: @@ -71,7 +71,7 @@ datadog_event: title="Testing from ansible" text="Test!" priority="low" # Post an event with several tags datadog_event: title="Testing from ansible" text="Test!" api_key="6873258723457823548234234234" - tags=aa,bb,cc + tags=aa,bb,#host:{{ inventory_hostname }} ''' import socket @@ -86,7 +86,7 @@ def main(): priority=dict( required=False, default='normal', choices=['normal', 'low'] ), - tags=dict(required=False, default=None), + tags=dict(required=False, default=None, type='list'), alert_type=dict( required=False, default='info', choices=['error', 'warning', 'info', 'success'] @@ -116,7 +116,7 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") + body['tags'] = module.params['tags'] if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py new file mode 100644 index 00000000000..9853d748c2c --- /dev/null +++ b/monitoring/datadog_monitor.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Sebastian Kornehl +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# import module snippets + +# Import Datadog +try: + from datadog import initialize, api + HAS_DATADOG = True +except: + HAS_DATADOG = False + +DOCUMENTATION = ''' +--- +module: datadog_monitor +short_description: Manages Datadog monitors +description: +- "Manages monitors within Datadog" +- "Options like described on http://docs.datadoghq.com/api/" +version_added: "2.0" +author: "Sebastian Kornehl (@skornehl)" +notes: [] +requirements: [datadog] +options: + api_key: + description: ["Your DataDog API key."] + required: true + app_key: + description: ["Your DataDog app key."] + required: true + state: + description: ["The designated state of the monitor."] + required: true + choices: ['present', 'absent', 'muted', 'unmuted'] + type: + description: ["The type of the monitor."] + required: false + default: null + choices: ['metric alert', 'service check'] + query: + description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] + required: false + default: null + name: + description: ["The name of the alert."] + required: true + message: + description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] + required: false + default: null + silenced: + description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "] + required: false + default: "" + notify_no_data: + description: ["A boolean indicating whether this monitor will notify when data stops reporting.."] + required: false + default: False + no_data_timeframe: + description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."] + required: false + default: 2x timeframe for metric, 2 minutes for service + timeout_h: + description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."] + required: false + default: null + renotify_interval: + description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."] + required: false + default: null + escalation_message: + description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"] + required: false + default: null + notify_audit: + description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."] + required: false + default: False + thresholds: + description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."] + required: false + default: {'ok': 1, 'critical': 1, 'warning': 1} +''' + +EXAMPLES = ''' +# Create a metric monitor +datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + query: "datadog.agent.up".over("host:host1").last(2).count_by_status()" + message: "Some message." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Deletes a monitor +datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Mutes a monitor +datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Unmutes a monitor +datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True), + app_key=dict(required=True), + state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']), + type=dict(required=False, choises=['metric alert', 'service check']), + name=dict(required=True), + query=dict(required=False), + message=dict(required=False, default=None), + silenced=dict(required=False, default=None, type='dict'), + notify_no_data=dict(required=False, default=False, choices=BOOLEANS), + no_data_timeframe=dict(required=False, default=None), + timeout_h=dict(required=False, default=None), + renotify_interval=dict(required=False, default=None), + escalation_message=dict(required=False, default=None), + notify_audit=dict(required=False, default=False, choices=BOOLEANS), + thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg='datadogpy required for this module') + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _get_monitor(module): + for monitor in api.Monitor.get_all(): + if monitor['name'] == module.params['name']: + return monitor + return {} + + +def _post_monitor(module, options): + try: + msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + +def _update_monitor(module, monitor, options): + try: + msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif _equal_dicts(msg, monitor, ['creator', 'overall_state']): + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif (module.params['silenced'] is not None + and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index 63979f41bfb..c606dfdc9a0 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -29,9 +29,8 @@ short_description: create an annotation in librato description: - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically version_added: "1.6" -author: Seth Edwards +author: "Seth Edwards (@sedward)" requirements: - - urllib2 - base64 options: user: @@ -107,11 +106,7 @@ EXAMPLES = ''' ''' -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False +import urllib2 def post_annotation(module): user = module.params['user'] @@ -138,11 +133,11 @@ def post_annotation(module): headers = {} headers['Content-Type'] = 'application/json' - headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() + headers['Authorization'] = "Basic " + base64.b64encode(user + ":" + api_key).strip() req = urllib2.Request(url, json_body, headers) try: response = urllib2.urlopen(req) - except urllib2.HTTPError as e: + except urllib2.HTTPError, e: module.fail_json(msg="Request Failed", reason=e.reason) response = response.read() module.exit_json(changed=True, annotation=response) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index a19885ea702..a347afd84c2 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -19,8 +19,8 @@ DOCUMENTATION = ''' --- module: logentries -author: Ivan Vanderbyl -short_description: Module for tracking logs via logentries.com +author: "Ivan Vanderbyl (@ivanvanderbyl)" +short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime version_added: "1.6" diff --git a/monitoring/monit.py b/monitoring/monit.py index 8772d22b2d8..3d3c7c8c3ca 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -39,7 +39,7 @@ options: default: null choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] requirements: [ ] -author: Darryl Stoflet +author: "Darryl Stoflet (@dstoflet)" ''' EXAMPLES = ''' @@ -77,7 +77,7 @@ def main(): # Process 'name' Running - restart pending parts = line.split() if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: - return ' '.join(parts[2:]) + return ' '.join(parts[2:]).lower() else: return '' @@ -86,7 +86,8 @@ def main(): module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) return status() - present = status() != '' + process_status = status() + present = process_status != '' if not present and not state == 'present': module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) @@ -102,7 +103,7 @@ def main(): module.exit_json(changed=True, name=name, state=state) module.exit_json(changed=False, name=name, state=state) - running = 'running' in status() + running = 'running' in process_status if running and state in ['started', 'monitored']: module.exit_json(changed=False, name=name, state=state) @@ -119,7 +120,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) status = run_command('unmonitor') - if status in ['not monitored']: + if status in ['not monitored'] or 'unmonitor pending' in status: module.exit_json(changed=True, name=name, state=state) module.fail_json(msg='%s process not unmonitored' % name, status=status) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index c564e712b04..16edca2aa6a 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -9,7 +9,7 @@ # Tim Bielawa # # This software may be freely redistributed under the terms of the GNU -# general public license version 2. +# general public license version 2 or any later version. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . @@ -30,10 +30,12 @@ options: action: description: - Action to take. + - servicegroup options were added in 2.0. required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command" ] + "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", + "servicegroup_host_downtime" ] host: description: - Host to operate on in Nagios. @@ -51,6 +53,12 @@ options: Only usable with the C(downtime) action. required: false default: Ansible + comment: + version_added: "2.0" + description: + - Comment for C(downtime) action. + required: false + default: Scheduling downtime minutes: description: - Minutes to schedule downtime for. @@ -65,6 +73,11 @@ options: aliases: [ "service" ] required: true default: null + servicegroup: + version_added: "2.0" + description: + - the Servicegroup we want to set downtimes/alerts for. + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: - The raw command to send to nagios, which @@ -73,7 +86,7 @@ options: required: true default: null -author: Tim Bielawa +author: "Tim Bielawa (@tbielawa)" requirements: [ "Nagios" ] ''' @@ -84,12 +97,22 @@ EXAMPLES = ''' # schedule an hour of HOST downtime - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} +# schedule an hour of HOST downtime, with a comment describing the reason +- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + comment='This host needs disciplined' + # schedule downtime for ALL services on HOST - nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} # schedule downtime for a few services - nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} +# set 30 minutes downtime for all services in servicegroup foo +- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + +# set 30 minutes downtime for all host in servicegroup foo +- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + # enable SMART disk alerts - nagios: action=enable_alerts service=smart host={{ inventory_hostname }} @@ -169,13 +192,18 @@ def main(): 'silence_nagios', 'unsilence_nagios', 'command', + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', ] + module = AnsibleModule( argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), + comment=dict(default='Scheduling downtime'), host=dict(required=False, default=None), + servicegroup=dict(required=False, default=None), minutes=dict(default=30), cmdfile=dict(default=which_cmdfile()), services=dict(default=None, aliases=['service']), @@ -185,11 +213,12 @@ def main(): action = module.params['action'] host = module.params['host'] + servicegroup = module.params['servicegroup'] minutes = module.params['minutes'] services = module.params['services'] cmdfile = module.params['cmdfile'] command = module.params['command'] - + ################################################################## # Required args per action: # downtime = (minutes, service, host) @@ -217,6 +246,20 @@ def main(): except Exception: module.fail_json(msg='invalid entry for minutes') + ###################################################################### + + if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: + # Make sure there's an actual servicegroup selected + if not servicegroup: + module.fail_json(msg='no servicegroup selected to set downtime for') + # Make sure minutes is a number + try: + m = int(minutes) + if not isinstance(m, types.IntType): + module.fail_json(msg='minutes must be a number') + except Exception: + module.fail_json(msg='invalid entry for minutes') + ################################################################## if action in ['enable_alerts', 'disable_alerts']: if not services: @@ -258,7 +301,9 @@ class Nagios(object): self.module = module self.action = kwargs['action'] self.author = kwargs['author'] + self.comment = kwargs['comment'] self.host = kwargs['host'] + self.servicegroup = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] @@ -293,7 +338,7 @@ class Nagios(object): cmdfile=self.cmdfile) def _fmt_dt_str(self, cmd, host, duration, author=None, - comment="Scheduling downtime", start=None, + comment=None, start=None, svc=None, fixed=1, trigger=0): """ Format an external-command downtime string. @@ -326,6 +371,9 @@ class Nagios(object): if not author: author = self.author + if not comment: + comment = self.comment + if svc is not None: dt_args = [svc, str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] @@ -356,7 +404,7 @@ class Nagios(object): notif_str = "[%s] %s" % (entry_time, cmd) if host is not None: notif_str += ";%s" % host - + if svc is not None: notif_str += ";%s" % svc @@ -796,42 +844,42 @@ class Nagios(object): return return_str_list else: return "Fail: could not write to the command file" - + def silence_nagios(self): """ This command is used to disable notifications for all hosts and services in nagios. - + This is a 'SHUT UP, NAGIOS' command """ cmd = 'DISABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def unsilence_nagios(self): """ This command is used to enable notifications for all hosts and services in nagios. - + This is a 'OK, NAGIOS, GO'' command """ cmd = 'ENABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def nagios_cmd(self, cmd): """ This sends an arbitrary command to nagios - + It prepends the submitted time and appends a \n - + You just have to provide the properly formatted command """ - + pre = '[%s]' % int(time.time()) - + post = '\n' cmdstr = '%s %s %s' % (pre, cmd, post) self._write_command(cmdstr) - + def act(self): """ Figure out what you want to do from ansible, and then do the @@ -847,6 +895,12 @@ class Nagios(object): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) + elif self.action == "servicegroup_host_downtime": + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes) + elif self.action == "servicegroup_service_downtime": + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes) # toggle the host AND service alerts elif self.action == 'silence': @@ -871,13 +925,13 @@ class Nagios(object): services=self.services) elif self.action == 'silence_nagios': self.silence_nagios() - + elif self.action == 'unsilence_nagios': self.unsilence_nagios() - + elif self.action == 'command': self.nagios_cmd(self.command) - + # wtf? else: self.module.fail_json(msg="unknown action specified: '%s'" % \ diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 93d55832fd3..3d9bc6c0ec3 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -22,14 +22,14 @@ DOCUMENTATION = ''' --- module: newrelic_deployment version_added: "1.2" -author: Matt Coddington +author: "Matt Coddington (@mcodd)" short_description: Notify newrelic about app deployments description: - - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) + - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) options: token: description: - - API token. + - API token, to place in the x-api-key header. required: true app_name: description: @@ -72,8 +72,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -83,6 +82,8 @@ EXAMPLES = ''' revision=1.0 ''' +import urllib + # =========================================== # Module execution. # @@ -102,6 +103,7 @@ def main(): environment=dict(required=False), validate_certs = dict(default='yes', type='bool'), ), + required_one_of=[['app_name', 'application_id']], supports_check_mode=True ) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index aa6903414dd..24c622c83a8 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -7,7 +7,10 @@ short_description: Create PagerDuty maintenance windows description: - This module will let you create PagerDuty maintenance windows version_added: "1.2" -author: Justin Johns +author: + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns" requirements: - PagerDuty API access options: diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 6f658cd9505..fd06a1217cb 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -7,7 +7,9 @@ short_description: Pause/unpause Pingdom alerts description: - This module will let you pause/unpause Pingdom alerts version_added: "1.2" -author: Justin Johns +author: + - "Dylan Silva (@thaumos)" + - "Justin Johns" requirements: - "This pingdom python library: https://github.com/mbabineau/pingdom-python" options: @@ -111,7 +113,7 @@ def main(): ) if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") + module.fail_json(msg="Missing required pingdom module (check docs)") checkid = module.params['checkid'] state = module.params['state'] diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 772e78fc5c2..060193b78a5 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rollbar_deployment version_added: 1.6 -author: Max Riveiro +author: "Max Riveiro (@kavu)" short_description: Notify Rollbar about app deployments description: - Notify Rollbar about app deployments @@ -76,6 +76,7 @@ EXAMPLES = ''' comment='Test Deploy' ''' +import urllib def main(): diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py new file mode 100644 index 00000000000..a1bd36ca665 --- /dev/null +++ b/monitoring/sensu_check.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: sensu_check +short_description: Manage Sensu checks +version_added: 2.0 +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and will not be added to the check definition unless specified. + - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, + - they are simply specified for your convenience. +options: + name: + description: + - The name of the check + - This is the key that is used to determine whether a check exists + required: true + state: + description: Whether the check should be present or not + choices: [ 'present', 'absent' ] + required: false + default: present + path: + description: + - Path to the json file of the check to be added/removed. + - Will be created if it does not exist (unless I(state=absent)). + - The parent folders need to exist when I(state=present), otherwise an error will be thrown + required: false + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so + - you can get the original file back if you somehow clobbered it incorrectly. + choices: [ 'yes', 'no' ] + required: false + default: no + command: + description: + - Path to the sensu check to run (not required when I(state=absent)) + required: true + handlers: + description: + - List of handlers to notify when the check fails + required: false + default: [] + subscribers: + description: + - List of subscribers/channels this check should run for + - See sensu_subscribers to subscribe a machine to a channel + required: false + default: [] + interval: + description: + - Check interval in seconds + required: false + default: null + timeout: + description: + - Timeout for the check + required: false + default: 10 + handle: + description: + - Whether the check should be handled or not + choices: [ 'yes', 'no' ] + required: false + default: yes + subdue_begin: + description: + - When to disable handling of check failures + required: false + default: null + subdue_end: + description: + - When to enable handling of check failures + required: false + default: null + dependencies: + description: + - Other checks this check depends on, if dependencies fail, + - handling of this check will be disabled + required: false + default: [] + metric: + description: Whether the check is a metric + choices: [ 'yes', 'no' ] + required: false + default: no + standalone: + description: + - Whether the check should be scheduled by the sensu client or server + - This option obviates the need for specifying the I(subscribers) option + choices: [ 'yes', 'no' ] + required: false + default: no + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it via the sensu api + choices: [ 'yes', 'no' ] + required: false + default: yes + occurrences: + description: + - Number of event occurrences before the handler should take action + required: false + default: 1 + refresh: + description: + - Number of seconds handlers should wait before taking second action + required: false + default: null + aggregate: + description: + - Classifies the check as an aggregate check, + - making it available via the aggregate API + choices: [ 'yes', 'no' ] + required: false + default: no + low_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null + high_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null +requirements: [ ] +author: Anders Ingemann +''' + +EXAMPLES = ''' +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: get cpu metrics + sensu_check: name=cpu_load + command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric=yes handlers=relay subscribers=common interval=60 + +# Check whether nginx is running +- name: check nginx process + sensu_check: name=nginx_running + command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid' + handlers=default subscribers=nginx interval=60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: check disk + sensu_check: name=check_disk_capacity +''' + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + import json + except ImportError: + import simplejson as json + + try: + try: + stream = open(path, 'r') + config = json.load(stream.read()) + except IOError, e: + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + finally: + if stream: + stream.close() + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError, e: + module.fail_json(msg=str(e)) + finally: + if stream: + stream.close() + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': 'no'}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list'}, + 'subscribers': {'type': 'list'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list'}, + 'metric': {'type': 'bool', 'default': 'no'}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + +from ansible.module_utils.basic import * +main() diff --git a/monitoring/stackdriver.py b/monitoring/stackdriver.py index c36964dd9d2..570e6659ac0 100644 --- a/monitoring/stackdriver.py +++ b/monitoring/stackdriver.py @@ -8,7 +8,7 @@ short_description: Send code deploy and annotation events to stackdriver description: - Send code deploy and annotation events to Stackdriver version_added: "1.6" -author: Ben Whaley +author: "Ben Whaley (@bwhaley)" options: key: description: diff --git a/monitoring/uptimerobot.py b/monitoring/uptimerobot.py index 889d144c9b3..6d5c9c7bac0 100644 --- a/monitoring/uptimerobot.py +++ b/monitoring/uptimerobot.py @@ -6,7 +6,7 @@ module: uptimerobot short_description: Pause and start Uptime Robot monitoring description: - This module will let you start and pause Uptime Robot Monitoring -author: Nate Kingsley +author: "Nate Kingsley (@nate-kingsley)" version_added: "1.9" requirements: - Valid Uptime Robot API Key diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 489a8617f54..4aad1218789 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - -# (c) 2014, René Moser +# +# (c) 2013-2014, Epic Games, Inc. # # This file is part of Ansible # @@ -22,191 +22,188 @@ DOCUMENTATION = ''' --- module: zabbix_group -short_description: Add or remove a host group to Zabbix. +short_description: Zabbix host groups creates/deletes description: - - This module uses the Zabbix API to add and remove host groups. -version_added: '1.8' -requirements: [ 'zabbix-api' ] + - Create host groups if they do not exist. + - Delete existing host groups if they exist. +version_added: "1.8" +author: + - "(@cove)" + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" +requirements: + - "python >= 2.6" + - zabbix-api options: - state: - description: - - Whether the host group should be added or removed. - required: false - default: present - choices: [ 'present', 'absent' ] - host_group: - description: - - Name of the host group to be added or removed. - required: true - default: null - aliases: [ ] server_url: description: - - Url of Zabbix server, with protocol (http or https) e.g. - https://monitoring.example.com/zabbix. C(url) is an alias - for C(server_url). If not set environment variable - C(ZABBIX_SERVER_URL) is used. + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). required: true - default: null - aliases: [ 'url' ] + aliases: [ "url" ] login_user: description: - - Zabbix user name. If not set environment variable - C(ZABBIX_LOGIN_USER) is used. + - Zabbix user name. required: true - default: null login_password: description: - - Zabbix user password. If not set environment variable - C(ZABBIX_LOGIN_PASSWORD) is used. + - Zabbix user password. required: true + state: + description: + - Create or delete host group. + required: false + default: "present" + choices: [ "present", "absent" ] + timeout: + description: + - The timeout of API request(seconds). + default: 10 + host_groups: + description: + - List of host groups to create or delete. + required: true + aliases: [ "host_group" ] notes: - - The module has been tested with Zabbix Server 2.2. -author: René Moser + - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed. ''' EXAMPLES = ''' ---- -# Add a new host group to Zabbix -- zabbix_group: host_group='Linux servers' - server_url=https://monitoring.example.com/zabbix - login_user=ansible - login_password=secure - -# Add a new host group, login data is provided by environment variables: -# ZABBIX_LOGIN_USER, ZABBIX_LOGIN_PASSWORD, ZABBIX_SERVER_URL: -- zabbix_group: host_group=Webservers - -# Remove a host group from Zabbix -- zabbix_group: host_group='Linux servers' - state=absent - server_url=https://monitoring.example.com/zabbix - login_user=ansible - login_password=secure +# Base create host groups example +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + +# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + when: inventory_hostname==groups['group_name'][0] ''' try: - from zabbix_api import ZabbixAPI + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import Already_Exists + HAS_ZABBIX_API = True except ImportError: HAS_ZABBIX_API = False -def create_group(zbx, host_group): - try: - result = zbx.hostgroup.create( - { - 'name': host_group - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result['groupids'], None - - -def get_group(zbx, host_group): - try: - result = zbx.hostgroup.get( - { - 'filter': - { - 'name': host_group, - } - } - ) - except BaseException as e: - return 1, None, str(e) - - return 0, result[0]['groupid'], None - - -def delete_group(zbx, group_id): - try: - zbx.hostgroup.delete([ group_id ]) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def check_group(zbx, host_group): - try: - result = zbx.hostgroup.exists( - { - 'name': host_group - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result, None +class HostGroup(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # create host group(s) if not exists + def create_host_group(self, group_names): + try: + group_add_list = [] + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.create({'name': group_name}) + group_add_list.append(group_name) + except Already_Exists: + return group_add_list + return group_add_list + except Exception, e: + self._module.fail_json(msg="Failed to create host group(s): %s" % e) + + # delete host group(s) + def delete_host_group(self, group_ids): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.delete(group_ids) + except Exception, e: + self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e) + + # get group ids by name + def get_group_ids(self, host_groups): + group_ids = [] + + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append(group_id) + return group_ids, group_list def main(): module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent']), - host_group=dict(required=True, default=None), - server_url=dict(default=None, aliases=['url']), - login_user=dict(default=None), - login_password=dict(default=None), + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + host_groups=dict(required=True, aliases=['host_group']), + state=dict(default="present", choices=['present','absent']), + timeout=dict(type='int', default=10) ), - supports_check_mode=True, + supports_check_mode=True ) if not HAS_ZABBIX_API: - module.fail_json(msg='Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)') + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") - try: - login_user = module.params['login_user'] or os.environ['ZABBIX_LOGIN_USER'] - login_password = module.params['login_password'] or os.environ['ZABBIX_LOGIN_PASSWORD'] - server_url = module.params['server_url'] or os.environ['ZABBIX_SERVER_URL'] - except KeyError, e: - module.fail_json(msg='Missing login data: %s is not set.' % e.message) - - host_group = module.params['host_group'] + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_groups = module.params['host_groups'] state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix try: - zbx = ZabbixAPI(server_url) + zbx = ZabbixAPI(server_url, timeout=timeout) zbx.login(login_user, login_password) - except BaseException as e: - module.fail_json(msg='Failed to connect to Zabbix server: %s' % e) - - changed = False - msg = '' - - if state == 'present': - (rc, exists, error) = check_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error)) - if not exists: - if module.check_mode: - changed = True - else: - (rc, group, error) = create_group(zbx, host_group) - if rc == 0: - changed = True - else: - module.fail_json(msg='Failed to get host group: %s' % error) - - if state == 'absent': - (rc, exists, error) = check_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error)) - if exists: - if module.check_mode: - changed = True - else: - (rc, group_id, error) = get_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to get host group: %s' % error) - - (rc, _, error) = delete_group(zbx, group_id) - if rc == 0: - changed = True - else: - module.fail_json(msg='Failed to remove host group: %s' % error) - - module.exit_json(changed=changed) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + hostGroup = HostGroup(module, zbx) + + group_ids = [] + group_list = [] + if host_groups: + group_ids, group_list = hostGroup.get_group_ids(host_groups) + + if state == "absent": + # delete host groups + if group_ids: + delete_group_names = [] + hostGroup.delete_host_group(group_ids) + for group in group_list: + delete_group_names.append(group['name']) + module.exit_json(changed=True, + result="Successfully deleted host group(s): %s." % ",".join(delete_group_names)) + else: + module.exit_json(changed=False, result="No host group(s) to delete.") + else: + # create host groups + group_add_list = hostGroup.create_host_group(host_groups) + if len(group_add_list) > 0: + module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list) + else: + module.exit_json(changed=False) from ansible.module_utils.basic import * main() diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py new file mode 100644 index 00000000000..6fac82c7177 --- /dev/null +++ b/monitoring/zabbix_host.py @@ -0,0 +1,485 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_host +short_description: Zabbix host creates/updates/deletes +description: + - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. +version_added: "2.0" +author: + - "(@cove)" + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" +requirements: + - "python >= 2.6" + - zabbix-api +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name, used to authenticate against the server. + required: true + login_password: + description: + - Zabbix user password. + required: true + host_name: + description: + - Name of the host in Zabbix. + - host_name is the unique identifier used and cannot be updated using this module. + required: true + host_groups: + description: + - List of host groups the host is part of. + required: false + link_templates: + description: + - List of templates linked to the host. + required: false + default: None + status: + description: + - Monitoring status of the host. + required: false + choices: ['enabled', 'disabled'] + default: "enabled" + state: + description: + - State of the host. + - On C(present), it will create if host does not exist or update the host if the associated data is different. + - On C(absent) will remove a host if it exists. + required: false + choices: ['present', 'absent'] + default: "present" + timeout: + description: + - The timeout of API request (seconds). + default: 10 + proxy: + description: + - The name of the Zabbix Proxy to be used + default: None + interfaces: + description: + - List of interfaces to be created for the host (see example below). + - 'Available values are: dns, ip, main, port, type and useip.' + - Please review the interface documentation for more information on the supported properties + - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface' + required: false + default: [] +''' + +EXAMPLES = ''' +- name: Create a new host or update an existing host's info + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_groups: + - Example group1 + - Example group2 + link_templates: + - Example template1 + - Example template2 + status: enabled + state: present + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 10050 + - type: 4 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 12345 + proxy: a.zabbix.proxy +''' + +import logging +import copy + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far), +# it does not support the 'hostinterface' api calls, +# so we have to inherit the ZabbixAPI class to add 'hostinterface' support. +class ZabbixAPIExtends(ZabbixAPI): + hostinterface = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs)) + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % group_name) + return True + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}}) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template) + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_host(self, host_name, group_ids, status, interfaces, proxy_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status} + if proxy_id: + parameters['proxy_hostid'] = proxy_id + host_list = self._zapi.host.create(parameters) + if len(host_list) >= 1: + return host_list['hostids'][0] + except Exception, e: + self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) + + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id} + self._zapi.host.update(parameters) + interface_list_copy = exist_interface_list + if interfaces: + for interface in interfaces: + flag = False + interface_str = interface + for exist_interface in exist_interface_list: + interface_type = interface['type'] + exist_interface_type = int(exist_interface['type']) + if interface_type == exist_interface_type: + # update + interface_str['interfaceid'] = exist_interface['interfaceid'] + self._zapi.hostinterface.update(interface_str) + flag = True + interface_list_copy.remove(exist_interface) + break + if not flag: + # add + interface_str['hostid'] = host_id + self._zapi.hostinterface.create(interface_str) + # remove + remove_interface_ids = [] + for remove_interface in interface_list_copy: + interface_id = remove_interface['interfaceid'] + remove_interface_ids.append(interface_id) + if len(remove_interface_ids) > 0: + self._zapi.hostinterface.delete(remove_interface_ids) + except Exception, e: + self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) + + def delete_host(self, host_id, host_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.delete({'hostid': host_id}) + except Exception, e: + self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) + + # get host by host name + def get_host_by_host_name(self, host_name): + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + + # get proxyid by proxy name + def get_proxyid_by_proxy_name(self, proxy_name): + proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}}) + if len(proxy_list) < 1: + self._module.fail_json(msg="Proxy not found: %s" % proxy_name) + else: + return proxy_list[0]['proxyid'] + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + group_ids = [] + if self.check_host_group_exist(group_names): + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append({'groupid': group_id}) + return group_ids + + # get host templates by host id + def get_host_templates_by_host_id(self, host_id): + template_ids = [] + template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id}) + for template in template_list: + template_ids.append(template['templateid']) + return template_ids + + # get host groups by host id + def get_host_groups_by_host_id(self, host_id): + exist_host_groups = [] + host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id}) + + if len(host_groups_list) >= 1: + for host_groups_name in host_groups_list: + exist_host_groups.append(host_groups_name['name']) + return exist_host_groups + + # check the exist_interfaces whether it equals the interfaces or not + def check_interface_properties(self, exist_interface_list, interfaces): + interfaces_port_list = [] + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(int(interface['port'])) + + exist_interface_ports = [] + if len(exist_interface_list) >= 1: + for exist_interface in exist_interface_list: + exist_interface_ports.append(int(exist_interface['port'])) + + if set(interfaces_port_list) != set(exist_interface_ports): + return True + + for exist_interface in exist_interface_list: + exit_interface_port = int(exist_interface['port']) + for interface in interfaces: + interface_port = int(interface['port']) + if interface_port == exit_interface_port: + for key in interface.keys(): + if str(exist_interface[key]) != str(interface[key]): + return True + + return False + + # get the status of host by host + def get_host_status_by_host(self, host): + return host['status'] + + # check all the properties before link or clear template + def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, host, proxy_id): + # get the existing host's groups + exist_host_groups = self.get_host_groups_by_host_id(host_id) + if set(host_groups) != set(exist_host_groups): + return True + + # get the existing status + exist_status = self.get_host_status_by_host(host) + if int(status) != int(exist_status): + return True + + # check the exist_interfaces whether it equals the interfaces or not + if self.check_interface_properties(exist_interfaces, interfaces): + return True + + # get the existing templates + exist_template_ids = self.get_host_templates_by_host_id(host_id) + if set(list(template_ids)) != set(exist_template_ids): + return True + + if host['proxy_hostid'] != proxy_id: + return True + + return False + + # link or clear template of the host + def link_or_clear_template(self, host_id, template_id_list): + # get host's exist template ids + exist_template_id_list = self.get_host_templates_by_host_id(host_id) + + exist_template_ids = set(exist_template_id_list) + template_ids = set(template_id_list) + template_id_list = list(template_ids) + + # get unlink and clear templates + templates_clear = exist_template_ids.difference(template_ids) + templates_clear_list = list(templates_clear) + request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception, e: + self._module.fail_json(msg="Failed to link template to host: %s" % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + host_name=dict(required=True), + host_groups=dict(required=False), + link_templates=dict(required=False), + status=dict(default="enabled", choices=['enabled', 'disabled']), + state=dict(default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10), + interfaces=dict(required=False), + proxy=dict(required=False) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + host_groups = module.params['host_groups'] + link_templates = module.params['link_templates'] + status = module.params['status'] + state = module.params['state'] + timeout = module.params['timeout'] + interfaces = module.params['interfaces'] + proxy = module.params['proxy'] + + # convert enabled to 0; disabled to 1 + status = 1 if status == "disabled" else 0 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + template_ids = [] + if link_templates: + template_ids = host.get_template_ids(link_templates) + + group_ids = [] + + if host_groups: + group_ids = host.get_group_ids_by_group_names(host_groups) + + ip = "" + if interfaces: + for interface in interfaces: + if interface['type'] == 1: + ip = interface['ip'] + + proxy_id = "0" + + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + + # check if host exist + is_host_exist = host.is_host_exist(host_name) + + if is_host_exist: + # get host id by host name + zabbix_host_obj = host.get_host_by_host_name(host_name) + host_id = zabbix_host_obj['hostid'] + + if state == "absent": + # remove host + host.delete_host(host_id, host_name) + module.exit_json(changed=True, result="Successfully delete host %s" % host_name) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + + # get exist host's interfaces + exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) + exist_interfaces_copy = copy.deepcopy(exist_interfaces) + + # update host + interfaces_len = len(interfaces) if interfaces else 0 + + if len(exist_interfaces) > interfaces_len: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, zabbix_host_obj, proxy_id): + host.link_or_clear_template(host_id, template_ids) + host.update_host(host_name, group_ids, status, host_id, + interfaces, exist_interfaces, proxy_id) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces_copy, zabbix_host_obj, proxy_id): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) + + if not interfaces or (interfaces and len(interfaces) == 0): + module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) + + # create host + host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( + host_name, ip, link_templates)) + +from ansible.module_utils.basic import * +main() + diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py new file mode 100644 index 00000000000..e8d65370760 --- /dev/null +++ b/monitoring/zabbix_hostmacro.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_hostmacro +short_description: Zabbix host macro creates/updates/deletes +description: + - manages Zabbix host macros, it can create, update or delete them. +version_added: "2.0" +author: + - "(@cave)" + - Dean Hailin Song +requirements: + - "python >= 2.6" + - zabbix-api +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + login_password: + description: + - Zabbix user password. + required: true + host_name: + description: + - Name of the host. + required: true + macro_name: + description: + - Name of the host macro. + required: true + macro_value: + description: + - Value of the host macro. + required: true + state: + description: + - State of the macro. + - On C(present), it will create if macro does not exist or update the macro if the associated data is different. + - On C(absent) will remove a macro if it exists. + required: false + choices: ['present', 'absent'] + default: "present" + timeout: + description: + - The timeout of API request (seconds). + default: 10 +''' + +EXAMPLES = ''' +- name: Create a new host macro or update an existing macro's value + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name:Example macro + macro_value:Example value + state: present +''' + +import logging +import copy + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far). +class ZabbixAPIExtends(ZabbixAPI): + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + + +class HostMacro(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # get host id by host name + def get_host_id(self, host_name): + try: + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + host_id = host_list[0]['hostid'] + return host_id + except Exception, e: + self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) + + # get host macro + def get_host_macro(self, macro_name, host_id): + try: + host_macro_list = self._zapi.usermacro.get( + {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}}) + if len(host_macro_list) > 0: + return host_macro_list[0] + return None + except Exception, e: + self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) + + # create host macro + def create_host_macro(self, macro_name, macro_value, host_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) + + # update host macro + def update_host_macro(self, host_macro_obj, macro_name, macro_value): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e)) + + # delete host macro + def delete_host_macro(self, host_macro_obj, macro_name): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.delete([host_macro_id]) + self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + host_name=dict(required=True), + macro_name=dict(required=True), + macro_value=dict(required=True), + state=dict(default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + macro_name = (module.params['macro_name']).upper() + macro_value = module.params['macro_value'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host_macro_class_obj = HostMacro(module, zbx) + + changed = False + + if host_name: + host_id = host_macro_class_obj.get_host_id(host_name) + host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id) + + if state == 'absent': + if not host_macro_obj: + module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name) + else: + # delete a macro + host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name) + else: + if not host_macro_obj: + # create host macro + host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id) + else: + # update host macro + host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) + +from ansible.module_utils.basic import * +main() + diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 559f9e0e55a..2d611382919 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -26,9 +26,10 @@ short_description: Create Zabbix maintenance windows description: - This module will let you create Zabbix maintenance windows. version_added: "1.8" -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: state: description: @@ -47,12 +48,10 @@ options: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_names: description: - Hosts to manage maintenance window for. @@ -82,7 +81,6 @@ options: description: - Unique name of maintenance window. required: true - default: null desc: description: - Short description of maintenance window. @@ -272,9 +270,9 @@ def main(): host_names=dict(type='list', required=False, default=None, aliases=['host_name']), minutes=dict(type='int', required=False, default=10), host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), - login_user=dict(required=True, default=None), - login_password=dict(required=True, default=None), - name=dict(required=True, default=None), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + name=dict(required=True), desc=dict(required=False, default="Created by Ansible"), collect_data=dict(type='bool', required=False, default=True), ), diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py new file mode 100644 index 00000000000..12ef6c69b6f --- /dev/null +++ b/monitoring/zabbix_screen.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: zabbix_screen +short_description: Zabbix screen creates/updates/deletes +description: + - This module allows you to create, modify and delete Zabbix screens and associated graph data. +version_added: "2.0" +author: + - "(@cove)" + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" +requirements: + - "python >= 2.6" + - zabbix-api +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + required: true + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + login_password: + description: + - Zabbix user password. + required: true + timeout: + description: + - The timeout of API request (seconds). + default: 10 + zabbix_screens: + description: + - List of screens to be created/updated/deleted(see example). + - If the screen(s) already been added, the screen(s) name won't be updated. + - When creating or updating screen(s), C(screen_name), C(host_group) are required. + - When deleting screen(s), the C(screen_name) is required. + - 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.' + required: true +notes: + - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = ''' +# Create/update a screen. +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Create/update multi-screen +- name: Create two of new screens or update the existing screens' items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + - screen_name: ExampleScreen2 + host_group: Example group2 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + screens: + - screen_name: ExampleScreen + host_group: Example group + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + when: inventory_hostname==groups['group_name'][0] +''' + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import ZabbixAPIException + from zabbix_api import Already_Exists + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call, +# we have to inherit the ZabbixAPI class to add 'screenitem' support. +class ZabbixAPIExtends(ZabbixAPI): + screenitem = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) + + +class Screen(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get group id by group name + def get_host_group_id(self, group_name): + if group_name == "": + self._module.fail_json(msg="group_name is required") + hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) + if len(hostGroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % group_name) + else: + hostGroup_id = hostGroup_list[0]['groupid'] + return hostGroup_id + + # get monitored host_id by host_group_id + def get_host_ids_by_group_id(self, group_id): + host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) + if len(host_list) < 1: + self._module.fail_json(msg="No host in the group.") + else: + host_ids = [] + for i in host_list: + host_id = i['hostid'] + host_ids.append(host_id) + return host_ids + + # get screen + def get_screen_id(self, screen_name): + if screen_name == "": + self._module.fail_json(msg="screen_name is required") + try: + screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) + if len(screen_id_list) >= 1: + screen_id = screen_id_list[0]['screenid'] + return screen_id + return None + except Exception as e: + self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) + + # create screen + def create_screen(self, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) + return screen['screenids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) + + # update screen + def update_screen(self, screen_id, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) + except Exception as e: + self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) + + # delete screen + def delete_screen(self, screen_id, screen_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.delete([screen_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) + + # get graph ids + def get_graph_ids(self, hosts, graph_name_list): + graph_id_lists = [] + vsize = 1 + for host in hosts: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + size = len(graph_id_list) + if size > 0: + graph_id_lists.extend(graph_id_list) + if vsize < size: + vsize = size + return graph_id_lists, vsize + + # getGraphs + def get_graphs_by_host_id(self, graph_name_list, host_id): + graph_ids = [] + for graph_name in graph_name_list: + graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) + graph_id_list = [] + if len(graphs_list) > 0: + for graph in graphs_list: + graph_id = graph['graphid'] + graph_id_list.append(graph_id) + if len(graph_id_list) > 0: + graph_ids.extend(graph_id_list) + return graph_ids + + # get screen items + def get_screen_items(self, screen_id): + screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) + return screen_item_list + + # delete screen items + def delete_screen_items(self, screen_id, screen_item_id_list): + try: + if len(screen_item_id_list) == 0: + return True + screen_item_list = self.get_screen_items(screen_id) + if len(screen_item_list) > 0: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screenitem.delete(screen_item_id_list) + return True + return False + except ZabbixAPIException: + pass + + # get screen's hsize and vsize + def get_hsize_vsize(self, hosts, v_size): + h_size = len(hosts) + if h_size == 1: + if v_size == 1: + h_size = 1 + elif v_size in range(2, 9): + h_size = 2 + else: + h_size = 3 + v_size = (v_size - 1) / h_size + 1 + return h_size, v_size + + # create screen_items + def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size): + if len(hosts) < 4: + if width is None or width < 0: + width = 500 + else: + if width is None or width < 0: + width = 200 + if height is None or height < 0: + height = 100 + + try: + # when there're only one host, only one row is not good. + if len(hosts) == 1: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) + for i, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + else: + for i, host in enumerate(hosts): + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + for j, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + except Already_Exists: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10), + screens=dict(type='dict', required=True) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + timeout = module.params['timeout'] + screens = module.params['screens'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + screen = Screen(module, zbx) + created_screens = [] + changed_screens = [] + deleted_screens = [] + + for zabbix_screen in screens: + screen_name = zabbix_screen['screen_name'] + screen_id = screen.get_screen_id(screen_name) + state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present" + + if state == "absent": + if screen_id: + screen_item_list = screen.get_screen_items(screen_id) + screen_item_id_list = [] + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + screen_item_id_list.append(screen_item_id) + screen.delete_screen_items(screen_id, screen_item_id_list) + screen.delete_screen(screen_id, screen_name) + + deleted_screens.append(screen_name) + else: + host_group = zabbix_screen['host_group'] + graph_names = zabbix_screen['graph_names'] + graph_width = None + if 'graph_width' in zabbix_screen: + graph_width = zabbix_screen['graph_width'] + graph_height = None + if 'graph_height' in zabbix_screen: + graph_height = zabbix_screen['graph_height'] + host_group_id = screen.get_host_group_id(host_group) + hosts = screen.get_host_ids_by_group_id(host_group_id) + + screen_item_id_list = [] + resource_id_list = [] + + graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) + h_size, v_size = screen.get_hsize_vsize(hosts, v_size) + + if not screen_id: + # create screen + screen_id = screen.create_screen(screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + created_screens.append(screen_name) + else: + screen_item_list = screen.get_screen_items(screen_id) + + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + resource_id = screen_item['resourceid'] + screen_item_id_list.append(screen_item_id) + resource_id_list.append(resource_id) + + # when the screen items changed, then update + if graph_ids != resource_id_list: + deleted = screen.delete_screen_items(screen_id, screen_item_id_list) + if deleted: + screen.update_screen(screen_id, screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + changed_screens.append(screen_name) + + if created_screens and changed_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) + elif created_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) + elif changed_screens: + module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) + elif deleted_screens: + module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) + else: + module.exit_json(changed=False) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 65410536eef..2ad66c23588 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb server objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 options: @@ -183,28 +183,35 @@ def main(): json_post = { 'server': { - 'name': slb_server, - 'host': slb_server_ip, - 'status': axapi_enabled_disabled(slb_server_status), - 'port_list': slb_server_ports, + 'name': slb_server, } } + # add optional module parameters + if slb_server_ip: + json_post['server']['host'] = slb_server_ip + + if slb_server_ports: + json_post['server']['port_list'] = slb_server_ports + + if slb_server_status: + json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) slb_server_exists = not axapi_failure(slb_server_data) changed = False if state == 'present': - if not slb_server_ip: - module.fail_json(msg='you must specify an IP address when creating a server') - if not slb_server_exists: + if not slb_server_ip: + module.fail_json(msg='you must specify an IP address when creating a server') + result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) changed = True else: - def needs_update(src_ports, dst_ports): + def port_needs_update(src_ports, dst_ports): ''' Checks to determine if the port definitions of the src_ports array are in or different from those in dst_ports. If there is @@ -227,12 +234,24 @@ def main(): # every port from the src exists in the dst, and none of them were different return False + def status_needs_update(current_status, new_status): + ''' + Check to determine if we want to change the status of a server. + If there is a difference between the current status of the server and + the desired status, return true, otherwise false. + ''' + if current_status != new_status: + return True + return False + defined_ports = slb_server_data.get('server', {}).get('port_list', []) + current_status = slb_server_data.get('server', {}).get('status') - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): + # we check for a needed update several ways + # - in case ports are missing from the ones specified by the user + # - in case ports are missing from those on the device + # - in case we are change the status of a server + if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) @@ -249,10 +268,10 @@ def main(): result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) changed = True else: - result = dict(msg="the server was not present") + result = dict(msg="the server was not present") - # if the config has changed, save the config unless otherwise requested - if changed and write_config: + # if the config has changed, or we want to force a save, save the config unless otherwise requested + if changed or write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py index 3627e2d12b8..db1c21bc78e 100644 --- a/network/a10/a10_service_group.py +++ b/network/a10/a10_service_group.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb service-group objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 - When a server doesn't exist and is added to the service-group the server will be created diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py index 3d807c098cf..eb308a3032a 100644 --- a/network/a10/a10_virtual_server.py +++ b/network/a10/a10_virtual_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 requirements: diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index b2f87aa0d08..384a625bdca 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -81,8 +81,8 @@ options: default: 'yes' choices: ['yes', 'no'] -requirements: [ "urllib", "urllib2" ] -author: Nandor Sivok +requirements: [] +author: "Nandor Sivok (@dominis)" ''' EXAMPLES = ''' @@ -99,7 +99,7 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api import base64 import socket - +import urllib class netscaler(object): diff --git a/network/dnsimple.py b/network/dnsimple.py old mode 100755 new mode 100644 index 363a2ca24c1..5cecfbd8169 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -32,7 +32,7 @@ options: description: - Account API token. See I(account_email) for info. required: false - default: null + default: null domain: description: @@ -67,7 +67,7 @@ options: default: 3600 (one hour) value: - description: + description: - Record value - "Must be specified when trying to ensure a record exists" required: false @@ -93,7 +93,7 @@ options: default: null requirements: [ dnsimple ] -author: Alex Coomans +author: "Alex Coomans (@drcapulet)" ''' EXAMPLES = ''' @@ -133,9 +133,9 @@ import os try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True except ImportError: - print "failed=True msg='dnsimple required for this module'" - sys.exit(1) + HAS_DNSIMPLE = False def main(): module = AnsibleModule( @@ -148,7 +148,7 @@ def main(): type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl = dict(required=False, default=3600, type='int'), value = dict(required=False), - priority = dict(required=False, type='int'), + priority = dict(required=False, type='int'), state = dict(required=False, choices=['present', 'absent']), solo = dict(required=False, type='bool'), ), @@ -158,6 +158,9 @@ def main(): supports_check_mode = True, ) + if not HAS_DNSIMPLE: + module.fail_json("dnsimple required for this module") + account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 148e25a5011..cce7bd10082 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -86,8 +86,8 @@ notes: - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. -requirements: [ urllib, urllib2, hashlib, hmac ] -author: Brice Burgess +requirements: [ hashlib, hmac ] +author: "Brice Burgess (@briceburg)" ''' EXAMPLES = ''' @@ -113,6 +113,8 @@ EXAMPLES = ''' # DNSMadeEasy module specific support methods. # +import urllib + IMPORT_ERROR = None try: import json @@ -134,6 +136,7 @@ class DME2: self.domain_map = None # ["domain_name"] => ID self.record_map = None # ["record_name"] => ID self.records = None # ["record_ID"] => + self.all_records = None # Lookup the domain ID if passed as a domain name vs. ID if not self.domain.isdigit(): @@ -191,11 +194,33 @@ class DME2: return self.records.get(record_id, False) - def getRecordByName(self, record_name): - if not self.record_map: - self._instMap('record') - - return self.getRecord(self.record_map.get(record_name, 0)) + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + # TODO SRV type not yet implemented + if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["MX", "NS", "TXT"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') def getRecords(self): return self.query(self.record_url, 'GET')['data'] @@ -262,9 +287,11 @@ def main(): "account_secret"], module.params["domain"], module) state = module.params["state"] record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] # Follow Keyword Controlled Behavior - if not record_name: + if record_name is None: domain_records = DME.getRecords() if not domain_records: module.fail_json( @@ -272,11 +299,15 @@ def main(): module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one - current_record = DME.getRecordByName(record_name) + current_record = DME.getMatchingRecord(record_name, record_type, record_value) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: - if module.params[i]: + if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] # Compare new record against existing one changed = False @@ -292,7 +323,7 @@ def main(): if not "value" in new_record: if not current_record: module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) module.exit_json(changed=False, result=current_record) # create record as it does not exist diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py old mode 100755 new mode 100644 index 2c333e58fba..1b106ba0a3e --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 session: description: - BIG-IP session support; may be useful to avoid concurrency @@ -70,8 +78,8 @@ options: required: true default: null choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', + 'device', 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan'] aliases: [] filter: @@ -1566,6 +1574,12 @@ def generate_software_list(f5): software_list = software.get_all_software_status() return software_list +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def main(): module = AnsibleModule( @@ -1573,6 +1587,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), session = dict(type='bool', default=False), include = dict(type='list', required=True), filter = dict(type='str', required=False), @@ -1585,6 +1600,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] if fact_filter: @@ -1593,14 +1609,17 @@ def main(): regex = None include = map(lambda x: x.lower(), module.params['include']) valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', + 'device', 'device_group', 'interface', 'key', 'node', + 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + if not validate_certs: + disable_ssl_cert_validation() + try: facts = {} @@ -1665,6 +1684,8 @@ def main(): module.exit_json(**result) # include magic from lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 62823f86579..ea24e995e27 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors" description: - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" version_added: "1.4" -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -51,6 +51,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Monitor state @@ -155,27 +163,10 @@ EXAMPLES = ''' name: "{{ monitorname }}" ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = 'TTYPE_HTTP' DEFAULT_PARENT_TYPE = 'http' -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - def check_monitor_exists(module, api, monitor, parent): @@ -262,7 +253,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -305,14 +295,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update( dict( name = dict(required=True), parent = dict(default=DEFAULT_PARENT_TYPE), parent_partition = dict(default='Common'), @@ -324,19 +308,20 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] receive_disable = module.params['receive_disable'] @@ -348,8 +333,6 @@ def main(): # end monitor specific stuff - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -460,5 +443,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 8b89a0c6113..0900e95fd20 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -49,6 +49,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Monitor state @@ -173,29 +181,11 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -218,7 +208,7 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: + try: api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) except bigsuds.OperationFailed, e: if "already exists" in str(e): @@ -282,7 +272,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -325,14 +314,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(required=True), type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), parent = dict(default=DEFAULT_PARENT), @@ -344,20 +327,21 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] type = 'TTYPE_' + module.params['type'].upper() - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] ip = module.params['ip'] @@ -372,8 +356,6 @@ def main(): # end monitor specific stuff - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -485,5 +467,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 68b6a2b52f1..28eacc0d6f5 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes" description: - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" version_added: "1.4" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool member state @@ -61,6 +69,22 @@ options: default: present choices: ['present', 'absent'] aliases: [] + session_state: + description: + - Set new session availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] + monitor_state: + description: + - Set monitor availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] partition: description: - Partition @@ -137,22 +161,32 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# ========================== -# bigip_node module specific +# The BIG-IP GUI doesn't map directly to the API calls for "Node -> +# General Properties -> State". The following states map to API monitor +# and session states. +# +# Enabled (all traffic allowed): +# monitor_state=enabled, session_state=enabled +# Disabled (only persistent or active connections allowed): +# monitor_state=enabled, session_state=disabled +# Forced offline (only active connections allowed): +# monitor_state=disabled, session_state=disabled # +# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api + - name: Force node offline + local_action: > + bigip_node + server=lb.mydomain.com + user=admin + password=mysecret + state=present + session_state=disabled + monitor_state=disabled + partition=matthite + name="{{ ansible_default_ipv4["address"] }}" + +''' def node_exists(api, address): # hack to determine if node exists @@ -201,37 +235,55 @@ def delete_node_address(api, address): def set_node_description(api, name, description): api.LocalLB.NodeAddressV2.set_description(nodes=[name], - descriptions=[description]) + descriptions=[description]) def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] +def set_node_session_enabled_state(api, name, session_state): + session_state = "STATE_%s" % session_state.strip().upper() + api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], + states=[session_state]) + +def get_node_session_status(api, name): + result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] + result = result.split("SESSION_STATUS_")[-1].lower() + return result + +def set_node_monitor_state(api, name, monitor_state): + monitor_state = "STATE_%s" % monitor_state.strip().upper() + api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name], + states=[monitor_state]) + +def get_node_monitor_status(api, name): + result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0] + result = result.split("MONITOR_STATUS_")[-1].lower() + return result + + def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - partition = dict(type='str', default='Common'), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( + session_state = dict(type='str', choices=['enabled', 'disabled']), + monitor_state = dict(type='str', choices=['enabled', 'disabled']), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), description = dict(type='str') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] + session_state = module.params['session_state'] + monitor_state = module.params['monitor_state'] host = module.params['host'] name = module.params['name'] - address = "/%s/%s" % (partition, name) + address = fq_name(partition, name) description = module.params['description'] if state == 'absent' and host is not None: @@ -264,6 +316,13 @@ def main(): module.fail_json(msg="unable to create: %s" % desc) else: result = {'changed': True} + if session_state is not None: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + if monitor_state is not None: + set_node_monitor_state(api, address, monitor_state) + result = {'changed': True} if description is not None: set_node_description(api, address, description) result = {'changed': True} @@ -277,6 +336,34 @@ def main(): module.fail_json(msg="Changing the node address is " \ "not supported by the API; " \ "delete and recreate the node.") + if session_state is not None: + session_status = get_node_session_status(api, address) + if session_state == 'enabled' and \ + session_status == 'forced_disabled': + if not module.check_mode: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + elif session_state == 'disabled' and \ + session_status != 'force_disabled': + if not module.check_mode: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + if monitor_state is not None: + monitor_status = get_node_monitor_status(api, address) + if monitor_state == 'enabled' and \ + monitor_status == 'forced_down': + if not module.check_mode: + set_node_monitor_state(api, address, + monitor_state) + result = {'changed': True} + elif monitor_state == 'disabled' and \ + monitor_status != 'forced_down': + if not module.check_mode: + set_node_monitor_state(api, address, + monitor_state) + result = {'changed': True} if description is not None: if get_node_description(api, address) != description: if not module.check_mode: @@ -290,5 +377,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 48d03b9f1cb..1628f6c68c9 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools" description: - "Manages F5 BIG-IP LTM pools via iControl SOAP API" version_added: "1.2" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool/pool member state @@ -220,21 +228,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -354,14 +347,9 @@ def main(): service_down_choices = ['none', 'reset', 'drop', 'reselect'] - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(type='str', required=True, aliases=['pool']), - partition = dict(type='str', default='Common'), lb_method = dict(type='str', choices=lb_method_choices), monitor_type = dict(type='str', choices=monitor_type_choices), quorum = dict(type='int'), @@ -370,20 +358,18 @@ def main(): service_down_action = dict(type='str', choices=service_down_choices), host = dict(type='str', aliases=['address']), port = dict(type='int') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] name = module.params['name'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, name) + pool = fq_name(partition,name) lb_method = module.params['lb_method'] if lb_method: lb_method = lb_method.lower() @@ -395,18 +381,18 @@ def main(): if monitors: monitors = [] for monitor in module.params['monitors']: - if "/" not in monitor: - monitors.append("/%s/%s" % (partition, monitor)) - else: - monitors.append(monitor) + monitors.append(fq_name(partition, monitor)) slow_ramp_time = module.params['slow_ramp_time'] service_down_action = module.params['service_down_action'] if service_down_action: service_down_action = service_down_action.lower() host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition,host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): @@ -532,5 +518,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 5aef9f0ae98..ec2b7135372 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members" description: - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" version_added: "1.4" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" @@ -39,86 +39,80 @@ options: description: - BIG-IP host required: true - default: null - choices: [] - aliases: [] user: description: - BIG-IP username required: true - default: null - choices: [] - aliases: [] password: description: - BIG-IP password required: true - default: null - choices: [] - aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 2.0 state: description: - Pool member state required: true default: present choices: ['present', 'absent'] - aliases: [] + session_state: + description: + - Set new session availability status for pool member + version_added: "2.0" + required: false + default: null + choices: ['enabled', 'disabled'] + monitor_state: + description: + - Set monitor availability status for pool member + version_added: "2.0" + required: false + default: null + choices: ['enabled', 'disabled'] pool: description: - Pool name. This pool must exist. required: true - default: null - choices: [] - aliases: [] partition: description: - Partition required: false default: 'Common' - choices: [] - aliases: [] host: description: - Pool member IP required: true - default: null - choices: [] aliases: ['address', 'name'] port: description: - Pool member port required: true - default: null - choices: [] - aliases: [] connection_limit: description: - Pool member connection limit. Setting this to 0 disables the limit. required: false default: null - choices: [] - aliases: [] description: description: - Pool member description required: false default: null - choices: [] - aliases: [] rate_limit: description: - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. required: false default: null - choices: [] - aliases: [] ratio: description: - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. required: false default: null - choices: [] - aliases: [] ''' EXAMPLES = ''' @@ -172,22 +166,35 @@ EXAMPLES = ''' host="{{ ansible_default_ipv4["address"] }}" port=80 -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True -# =========================================== -# bigip_pool_member module specific support methods. -# + # The BIG-IP GUI doesn't map directly to the API calls for "Pool -> + # Members -> State". The following states map to API monitor + # and session states. + # + # Enabled (all traffic allowed): + # monitor_state=enabled, session_state=enabled + # Disabled (only persistent or active connections allowed): + # monitor_state=enabled, session_state=disabled + # Forced offline (only active connections allowed): + # monitor_state=disabled, session_state=disabled + # + # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down + + - name: Force pool member offline + local_action: > + bigip_pool_member + server=lb.mydomain.com + user=admin + password=mysecret + state=present + session_state=disabled + monitor_state=disabled + pool=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api +''' def pool_exists(api, pool): # hack to determine if pool exists @@ -276,42 +283,61 @@ def set_ratio(api, pool, address, port, ratio): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) +def set_member_session_enabled_state(api, pool, address, port, session_state): + members = [{'address': address, 'port': port}] + session_state = ["STATE_%s" % session_state.strip().upper()] + api.LocalLB.Pool.set_member_session_enabled_state(pool_names=[pool], members=[members], session_states=[session_state]) + +def get_member_session_status(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_session_status(pool_names=[pool], members=[members])[0][0] + result = result.split("SESSION_STATUS_")[-1].lower() + return result + +def set_member_monitor_state(api, pool, address, port, monitor_state): + members = [{'address': address, 'port': port}] + monitor_state = ["STATE_%s" % monitor_state.strip().upper()] + api.LocalLB.Pool.set_member_monitor_state(pool_names=[pool], members=[members], monitor_states=[monitor_state]) + +def get_member_monitor_status(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_monitor_status(pool_names=[pool], members=[members])[0][0] + result = result.split("MONITOR_STATUS_")[-1].lower() + return result + def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec = f5_argument_spec(); + argument_spec.update(dict( + session_state = dict(type='str', choices=['enabled', 'disabled']), + monitor_state = dict(type='str', choices=['enabled', 'disabled']), pool = dict(type='str', required=True), - partition = dict(type='str', default='Common'), host = dict(type='str', required=True, aliases=['address', 'name']), port = dict(type='int', required=True), connection_limit = dict(type='int'), description = dict(type='str'), rate_limit = dict(type='int'), ratio = dict(type='int') - ), - supports_check_mode=True + ) ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + module = AnsibleModule( + argument_spec = argument_spec, + supports_check_mode=True + ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, module.params['pool']) + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + session_state = module.params['session_state'] + monitor_state = module.params['monitor_state'] + pool = fq_name(partition, module.params['pool']) connection_limit = module.params['connection_limit'] description = module.params['description'] rate_limit = module.params['rate_limit'] ratio = module.params['ratio'] host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition, host) port = module.params['port'] + # sanity check user supplied values if (host and not port) or (port and not host): @@ -347,6 +373,10 @@ def main(): set_rate_limit(api, pool, address, port, rate_limit) if ratio is not None: set_ratio(api, pool, address, port, ratio) + if session_state is not None: + set_member_session_enabled_state(api, pool, address, port, session_state) + if monitor_state is not None: + set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} else: # pool member exists -- potentially modify attributes @@ -366,6 +396,26 @@ def main(): if not module.check_mode: set_ratio(api, pool, address, port, ratio) result = {'changed': True} + if session_state is not None: + session_status = get_member_session_status(api, pool, address, port) + if session_state == 'enabled' and session_status == 'forced_disabled': + if not module.check_mode: + set_member_session_enabled_state(api, pool, address, port, session_state) + result = {'changed': True} + elif session_state == 'disabled' and session_status != 'force_disabled': + if not module.check_mode: + set_member_session_enabled_state(api, pool, address, port, session_state) + result = {'changed': True} + if monitor_state is not None: + monitor_status = get_member_monitor_status(api, pool, address, port) + if monitor_state == 'enabled' and monitor_status == 'forced_down': + if not module.check_mode: + set_member_monitor_state(api, pool, address, port, monitor_state) + result = {'changed': True} + elif monitor_state == 'disabled' and monitor_status != 'forced_down': + if not module.check_mode: + set_member_monitor_state(api, pool, address, port, monitor_state) + result = {'changed': True} except Exception, e: module.fail_json(msg="received exception: %s" % e) @@ -374,5 +424,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/haproxy.py b/network/haproxy.py index 38757599df5..6d4f6a4279a 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -68,6 +68,24 @@ options: - When disabling server, immediately terminate all the sessions attached to the specified server. This can be used to terminate long-running sessions after a server is put into maintenance mode, for instance. required: false default: false + wait: + description: + - Wait until the server reports a status of 'UP' when state=enabled, or status of 'MAINT' when state=disabled + required: false + default: false + version_added: "2.0" + wait_retries: + description: + - number of times to check for status after changing the state + required: false + default: 25 + version_added: "2.0" + wait_interval: + description: + - number of seconds to wait between retries + required: false + default: 5 + version_added: "2.0" ''' EXAMPLES = ''' @@ -82,24 +100,37 @@ examples: # disable server, provide socket file - haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www +# disable server, provide socket file, wait until status reports in maintenance +- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes + # disable backend server in 'www' backend pool and drop open sessions to it - haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true # enable server in 'www' backend pool - haproxy: state=enabled host={{ inventory_hostname }} backend=www +# enable server in 'www' backend pool wait until healthy +- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes + +# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health +- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5 + # enable server in 'www' backend pool with change server(s) weight - haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www -author: Ravi Bhure +author: "Ravi Bhure (@ravibhure)" ''' import socket +import csv +import time DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] +WAIT_RETRIES=25 +WAIT_INTERVAL=5 ###################################################################### class TimeoutException(Exception): @@ -126,10 +157,12 @@ class HAProxy(object): self.weight = self.module.params['weight'] self.socket = self.module.params['socket'] self.shutdown_sessions = self.module.params['shutdown_sessions'] - + self.wait = self.module.params['wait'] + self.wait_retries = self.module.params['wait_retries'] + self.wait_interval = self.module.params['wait_interval'] self.command_results = [] - def execute(self, cmd, timeout=200): + def execute(self, cmd, timeout=200, capture_output=True): """ Executes a HAProxy command by sending a message to a HAProxy's local UNIX socket and waiting up to 'timeout' milliseconds for the response. @@ -144,10 +177,35 @@ class HAProxy(object): while buf: result += buf buf = self.client.recv(RECV_SIZE) - self.command_results = result.strip() + if capture_output: + self.command_results = result.strip() self.client.close() return result + def wait_until_status(self, pxname, svname, status): + """ + Wait for a service to reach the specified status. Try RETRIES times + with INTERVAL seconds of sleep in between. If the service has not reached + the expected status in that time, the module will fail. If the service was + not found, the module will fail. + """ + for i in range(1, self.wait_retries): + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + found = False + for row in r: + if row['pxname'] == pxname and row['svname'] == svname: + found = True + if row['status'] == status: + return True; + else: + time.sleep(self.wait_interval) + + if not found: + self.module.fail_json(msg="unable to find server %s/%s" % (pxname, svname)) + + self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries)) + def enabled(self, host, backend, weight): """ Enabled action, marks server to UP and checks are re-enabled, @@ -170,6 +228,8 @@ class HAProxy(object): if weight: cmd += "; set weight %s/%s %s" % (pxname, svname, weight) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'UP') else: pxname = backend @@ -177,6 +237,8 @@ class HAProxy(object): if weight: cmd += "; set weight %s/%s %s" % (pxname, svname, weight) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'UP') def disabled(self, host, backend, shutdown_sessions): """ @@ -200,6 +262,8 @@ class HAProxy(object): if shutdown_sessions: cmd += "; shutdown sessions server %s/%s" % (pxname, svname) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'MAINT') else: pxname = backend @@ -207,6 +271,8 @@ class HAProxy(object): if shutdown_sessions: cmd += "; shutdown sessions server %s/%s" % (pxname, svname) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'MAINT') def act(self): """ @@ -236,6 +302,9 @@ def main(): weight=dict(required=False, default=None), socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION), shutdown_sessions=dict(required=False, default=False), + wait=dict(required=False, default=False, type='bool'), + wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'), + wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'), ), ) diff --git a/network/lldp.py b/network/lldp.py old mode 100755 new mode 100644 index d30fa5d9a60..3ed554f79c3 --- a/network/lldp.py +++ b/network/lldp.py @@ -24,7 +24,7 @@ short_description: get details reported by lldp description: - Reads data out of lldpctl options: {} -author: Andy Hill +author: "Andy Hill (@andyhky)" notes: - Requires lldpd running and lldp enabled on switches ''' @@ -58,6 +58,8 @@ def gather_lldp(): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: diff --git a/network/nmcli.py b/network/nmcli.py new file mode 100644 index 00000000000..c674114a32e --- /dev/null +++ b/network/nmcli.py @@ -0,0 +1,1070 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Chris Long +# +# This file is a module for Ansible that interacts with Network Manager +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION=''' +--- +module: nmcli +author: "Chris Long (@alcamie101)" +short_description: Manage Networking +requirements: [ nmcli, dbus ] +version_added: "2.0" +description: + - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc. +options: + state: + required: True + choices: [ present, absent ] + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + autoconnect: + required: False + default: "yes" + choices: [ "yes", "no" ] + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated + conn_name: + required: True + description: + - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-]' + ifname: + required: False + default: conn_name + description: + - Where IFNAME will be the what we call the interface name. + - interface to bind the connection to. The connection will only be applicable to this interface name. + - A special value of "*" can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. + type: + required: False + choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ] + description: + - This is the type of device or network connection that you wish to create. + mode: + required: False + choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ] + default: balence-rr + description: + - This is the type of device or network connection that you wish to create for a bond, team or bridge. + master: + required: False + default: None + description: + - master ] STP forwarding delay, in seconds + hellotime: + required: False + default: 2 + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds + maxage: + required: False + default: 20 + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds + ageingtime: + required: False + default: 300 + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds + mac: + required: False + default: None + description: + - 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)' + slavepriority: + required: False + default: 32 + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave + path_cost: + required: False + default: 100 + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave + hairpin: + required: False + default: yes + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. + vlanid: + required: False + default: None + description: + - This is only used with VLAN - VLAN ID in range <0-4095> + vlandev: + required: False + default: None + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname + flags: + required: False + default: None + description: + - This is only used with VLAN - flags + ingress: + required: False + default: None + description: + - This is only used with VLAN - VLAN ingress priority mapping + egress: + required: False + default: None + description: + - This is only used with VLAN - VLAN egress priority mapping + +''' + +EXAMPLES=''' +The following examples are working examples that I have run in the field. I followed follow the structure: +``` +|_/inventory/cloud-hosts +| /group_vars/openstack-stage.yml +| /host_vars/controller-01.openstack.host.com +| /host_vars/controller-02.openstack.host.com +|_/playbook/library/nmcli.py +| /playbook-add.yml +| /playbook-del.yml +``` + +## inventory examples +### groups_vars +```yml +--- +#devops_os_define_network +storage_gw: "192.168.0.254" +external_gw: "10.10.0.254" +tenant_gw: "172.100.0.254" + +#Team vars +nmcli_team: + - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} + - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} +nmcli_team_slave: + - {conn_name: 'em1', ifname: 'em1', master: 'tenant'} + - {conn_name: 'em2', ifname: 'em2', master: 'tenant'} + - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'} + +#bond vars +nmcli_bond: + - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'} + - {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'} + - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'} +nmcli_bond_slave: + - {conn_name: 'em1', ifname: 'em1', master: 'tenant'} + - {conn_name: 'em2', ifname: 'em2', master: 'tenant'} + - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'} + +#ethernet vars +nmcli_ethernet: + - {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"} + - {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} + - {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} +``` + +### host_vars +```yml +--- +storage_ip: "192.168.160.21/23" +external_ip: "10.10.152.21/21" +tenant_ip: "192.168.200.21/23" +``` + + + +## playbook-add.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + +- name: install needed network manager libs + yum: name={{ item }} state=installed + with_items: + - libnm-qt-devel.x86_64 + - nm-connection-editor.x86_64 + - libsemanage-python + - policycoreutils-python + +##### Working with all cloud nodes - Teaming + - name: try nmcli add team - conn_name only & ip4 gw4 + nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present + with_items: + - "{{nmcli_team}}" + + - name: try nmcli add teams-slave + nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present + with_items: + - "{{nmcli_team_slave}}" + +###### Working with all cloud nodes - Bonding +# - name: try nmcli add bond - conn_name only & ip4 gw4 mode +# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present +# with_items: +# - "{{nmcli_bond}}" +# +# - name: try nmcli add bond-slave +# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present +# with_items: +# - "{{nmcli_bond_slave}}" + +##### Working with all cloud nodes - Ethernet +# - name: nmcli add Ethernet - conn_name only & ip4 gw4 +# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present +# with_items: +# - "{{nmcli_ethernet}}" +``` + +## playbook-del.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + + - name: try nmcli del team - multiple + nmcli: conn_name={{item.conn_name}} state=absent + with_items: + - { conn_name: 'em1'} + - { conn_name: 'em2'} + - { conn_name: 'p1p1'} + - { conn_name: 'p1p2'} + - { conn_name: 'p2p1'} + - { conn_name: 'p2p2'} + - { conn_name: 'tenant'} + - { conn_name: 'storage'} + - { conn_name: 'external'} + - { conn_name: 'team-em1'} + - { conn_name: 'team-em2'} + - { conn_name: 'team-p1p1'} + - { conn_name: 'team-p1p2'} + - { conn_name: 'team-p2p1'} + - { conn_name: 'team-p2p2'} +``` +# To add an Ethernet connection with static IP configuration, issue a command as follows +- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + +# To add an Team connection with static IP configuration, issue a command as follows +- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes + +# Optionally, at the same time specify IPv6 addresses for the device as follows: +- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present + +# To add two IPv4 DNS server addresses: +-nmcli: conn_name=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present + +# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows +- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present + +# To change the property of a setting e.g. MTU, issue a command as follows: +- nmcli: conn_name=my-eth1 mtu=9000 state=present + + Exit Status's: + - nmcli exits with status 0 if it succeeds, a value greater than 0 is + returned if an error occurs. + - 0 Success - indicates the operation succeeded + - 1 Unknown or unspecified error + - 2 Invalid user input, wrong nmcli invocation + - 3 Timeout expired (see --wait option) + - 4 Connection activation failed + - 5 Connection deactivation failed + - 6 Disconnecting device failed + - 7 Connection deletion failed + - 8 NetworkManager is not running + - 9 nmcli and NetworkManager versions mismatch + - 10 Connection, device, or access point does not exist. +''' +# import ansible.module_utils.basic +import os +import syslog +import sys +import dbus +from gi.repository import NetworkManager, NMClient + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform='Generic' + distribution=None + bus=dbus.SystemBus() + # The following is going to be used in dbus code + DEVTYPES={1: "Ethernet", + 2: "Wi-Fi", + 5: "Bluetooth", + 6: "OLPC", + 7: "WiMAX", + 8: "Modem", + 9: "InfiniBand", + 10: "Bond", + 11: "VLAN", + 12: "ADSL", + 13: "Bridge", + 14: "Generic", + 15: "Team" + } + STATES={0: "Unknown", + 10: "Unmanaged", + 20: "Unavailable", + 30: "Disconnected", + 40: "Prepare", + 50: "Config", + 60: "Need Auth", + 70: "IP Config", + 80: "IP Check", + 90: "Secondaries", + 100: "Activated", + 110: "Deactivating", + 120: "Failed" + } + + + def __init__(self, module): + self.module=module + self.state=module.params['state'] + self.autoconnect=module.params['autoconnect'] + self.conn_name=module.params['conn_name'] + self.master=module.params['master'] + self.ifname=module.params['ifname'] + self.type=module.params['type'] + self.ip4=module.params['ip4'] + self.gw4=module.params['gw4'] + self.dns4=module.params['dns4'] + self.ip6=module.params['ip6'] + self.gw6=module.params['gw6'] + self.dns6=module.params['dns6'] + self.mtu=module.params['mtu'] + self.stp=module.params['stp'] + self.priority=module.params['priority'] + self.mode=module.params['mode'] + self.miimon=module.params['miimon'] + self.downdelay=module.params['downdelay'] + self.updelay=module.params['updelay'] + self.arp_interval=module.params['arp_interval'] + self.arp_ip_target=module.params['arp_ip_target'] + self.slavepriority=module.params['slavepriority'] + self.forwarddelay=module.params['forwarddelay'] + self.hellotime=module.params['hellotime'] + self.maxage=module.params['maxage'] + self.ageingtime=module.params['ageingtime'] + self.mac=module.params['mac'] + self.vlanid=module.params['vlanid'] + self.vlandev=module.params['vlandev'] + self.flags=module.params['flags'] + self.ingress=module.params['ingress'] + self.egress=module.params['egress'] + # select whether we dump additional debug info through syslog + self.syslogging=True + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + if self.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) + + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def merge_secrets(self, proxy, config, setting_name): + try: + # returns a dict of dicts mapping name::setting, where setting is a dict + # mapping key::value. Each member of the 'setting' dict is a secret + secrets=proxy.GetSecrets(setting_name) + + # Copy the secrets into our connection config + for setting in secrets: + for key in secrets[setting]: + config[setting_name][key]=secrets[setting][key] + except Exception, e: + pass + + def dict_to_string(self, d): + # Try to trivially translate a dictionary's elements into nice string + # formatting. + dstr="" + for key in d: + val=d[key] + str_val="" + add_string=True + if type(val)==type(dbus.Array([])): + for elt in val: + if type(elt)==type(dbus.Byte(1)): + str_val+="%s " % int(elt) + elif type(elt)==type(dbus.String("")): + str_val+="%s" % elt + elif type(val)==type(dbus.Dictionary({})): + dstr+=self.dict_to_string(val) + add_string=False + else: + str_val=val + if add_string: + dstr+="%s: %s\n" % ( key, str_val) + return dstr + + def connection_to_string(self, config): + # dump a connection configuration to use in list_connection_info + setting_list=[] + for setting_name in config: + setting_list.append(self.dict_to_string(config[setting_name])) + return setting_list + # print "" + + def list_connection_info(self): + # Ask the settings service for the list of connections it provides + bus=dbus.SystemBus() + + service_name="org.freedesktop.NetworkManager" + proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings") + settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings") + connection_paths=settings.ListConnections() + connection_list=[] + # List each connection's name, UUID, and type + for path in connection_paths: + con_proxy=bus.get_object(service_name, path) + settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection") + config=settings_connection.GetSettings() + + # Now get secrets too; we grab the secrets for each type of connection + # (since there isn't a "get all secrets" call because most of the time + # you only need 'wifi' secrets or '802.1x' secrets, not everything) and + # merge that into the configuration data - To use at a later stage + self.merge_secrets(settings_connection, config, '802-11-wireless') + self.merge_secrets(settings_connection, config, '802-11-wireless-security') + self.merge_secrets(settings_connection, config, '802-1x') + self.merge_secrets(settings_connection, config, 'gsm') + self.merge_secrets(settings_connection, config, 'cdma') + self.merge_secrets(settings_connection, config, 'ppp') + + # Get the details of the 'connection' setting + s_con=config['connection'] + connection_list.append(s_con['id']) + connection_list.append(s_con['uuid']) + connection_list.append(s_con['type']) + connection_list.append(self.connection_to_string(config)) + return connection_list + + def connection_exists(self): + # we are going to use name and type in this instance to find if that connection exists and is of type x + connections=self.list_connection_info() + + for con_item in connections: + if self.conn_name==con_item: + return True + + def down_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # if self.connection_exists(): + cmd.append('con') + cmd.append('down') + cmd.append(self.conn_name) + return self.execute_command(cmd) + + def up_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('up') + cmd.append(self.conn_name) + return self.execute_command(cmd) + + def create_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('team') + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + return cmd + + def modify_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + # Can't use MTU with team + return cmd + + def create_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append(self.type) + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + cmd.append('master') + if self.conn_name is not None: + cmd.append(self.master) + # if self.mtu is not None: + # cmd.append('802-3-ethernet.mtu') + # cmd.append(self.mtu) + return cmd + + def modify_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.conn_name) + cmd.append('connection.master') + cmd.append(self.master) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + return cmd + + def create_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('bond') + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + if self.mode is not None: + cmd.append('mode') + cmd.append(self.mode) + if self.miimon is not None: + cmd.append('miimon') + cmd.append(self.miimon) + if self.downdelay is not None: + cmd.append('downdelay') + cmd.append(self.downdelay) + if self.downdelay is not None: + cmd.append('updelay') + cmd.append(self.updelay) + if self.downdelay is not None: + cmd.append('arp-interval') + cmd.append(self.arp_interval) + if self.downdelay is not None: + cmd.append('arp-ip-target') + cmd.append(self.arp_ip_target) + return cmd + + def modify_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + return cmd + + def create_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append('bond-slave') + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + cmd.append('master') + if self.conn_name is not None: + cmd.append(self.master) + return cmd + + def modify_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.conn_name) + cmd.append('connection.master') + cmd.append(self.master) + return cmd + + def create_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('ethernet') + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + return cmd + + def modify_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('mod') + cmd.append(self.conn_name) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + if self.autoconnect is not None: + cmd.append('autoconnect') + cmd.append(self.autoconnect) + return cmd + + def create_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bridge interface + return cmd + + def modify_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bridge interface + return cmd + + def create_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + return cmd + + def modify_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + return cmd + + def create_connection(self): + cmd=[] + if self.type=='team': + # cmd=self.create_connection_team() + if (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_team() + self.execute_command(cmd) + cmd=self.modify_connection_team() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + elif (self.dns4 is None) or (self.dns6 is None): + cmd=self.create_connection_team() + return self.execute_command(cmd) + elif self.type=='team-slave': + if self.mtu is not None: + cmd=self.create_connection_team_slave() + self.execute_command(cmd) + cmd=self.modify_connection_team_slave() + self.execute_command(cmd) + # cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_team_slave() + return self.execute_command(cmd) + elif self.type=='bond': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_bond() + self.execute_command(cmd) + cmd=self.modify_connection_bond() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_bond() + return self.execute_command(cmd) + elif self.type=='bond-slave': + cmd=self.create_connection_bond_slave() + elif self.type=='ethernet': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_ethernet() + self.execute_command(cmd) + cmd=self.modify_connection_ethernet() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_ethernet() + return self.execute_command(cmd) + elif self.type=='bridge': + cmd=self.create_connection_bridge() + elif self.type=='vlan': + cmd=self.create_connection_vlan() + return self.execute_command(cmd) + + def remove_connection(self): + # self.down_connection() + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('del') + cmd.append(self.conn_name) + return self.execute_command(cmd) + + def modify_connection(self): + cmd=[] + if self.type=='team': + cmd=self.modify_connection_team() + elif self.type=='team-slave': + cmd=self.modify_connection_team_slave() + elif self.type=='bond': + cmd=self.modify_connection_bond() + elif self.type=='bond-slave': + cmd=self.modify_connection_bond_slave() + elif self.type=='ethernet': + cmd=self.modify_connection_ethernet() + elif self.type=='bridge': + cmd=self.modify_connection_bridge() + elif self.type=='vlan': + cmd=self.modify_connection_vlan() + return self.execute_command(cmd) + + +def main(): + # Parsing argument file + module=AnsibleModule( + argument_spec=dict( + autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + conn_name=dict(required=True, type='str'), + master=dict(required=False, default=None, type='str'), + ifname=dict(required=False, default=None, type='str'), + type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'), + ip4=dict(required=False, default=None, type='str'), + gw4=dict(required=False, default=None, type='str'), + dns4=dict(required=False, default=None, type='str'), + ip6=dict(required=False, default=None, type='str'), + gw6=dict(required=False, default=None, type='str'), + dns6=dict(required=False, default=None, type='str'), + # Bond Specific vars + mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'), + miimon=dict(required=False, default=None, type='str'), + downdelay=dict(required=False, default=None, type='str'), + updelay=dict(required=False, default=None, type='str'), + arp_interval=dict(required=False, default=None, type='str'), + arp_ip_target=dict(required=False, default=None, type='str'), + # general usage + mtu=dict(required=False, default=None, type='str'), + mac=dict(required=False, default=None, type='str'), + # bridge specific vars + stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'), + priority=dict(required=False, default="128", type='str'), + slavepriority=dict(required=False, default="32", type='str'), + forwarddelay=dict(required=False, default="15", type='str'), + hellotime=dict(required=False, default="2", type='str'), + maxage=dict(required=False, default="20", type='str'), + ageingtime=dict(required=False, default="300", type='str'), + # vlan specific vars + vlanid=dict(required=False, default=None, type='str'), + vlandev=dict(required=False, default=None, type='str'), + flags=dict(required=False, default=None, type='str'), + ingress=dict(required=False, default=None, type='str'), + egress=dict(required=False, default=None, type='str'), + ), + supports_check_mode=True + ) + + nmcli=Nmcli(module) + + rc=None + out='' + err='' + result={} + result['conn_name']=nmcli.conn_name + result['state']=nmcli.state + + # check for issues + if nmcli.conn_name is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + # team-slave checks + if nmcli.type=='team-slave' and nmcli.master is None: + nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing") + if nmcli.type=='team-slave' and nmcli.ifname is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + + if nmcli.state=='absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.down_connection() + (rc, out, err)=nmcli.remove_connection() + if rc!=0: + module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state=='present': + if nmcli.connection_exists(): + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) + result['Exists']='Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.modify_connection() + if not nmcli.connection_exists(): + result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.create_connection() + if rc is not None and rc!=0: + module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + if rc is None: + result['changed']=False + else: + result['changed']=True + if out: + result['stdout']=out + if err: + result['stderr']=err + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py index 551ca707a2d..b9ddff562c6 100644 --- a/network/openvswitch_bridge.py +++ b/network/openvswitch_bridge.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_bridge version_added: 1.4 -author: David Stygstra +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch bridges requirements: [ ovs-vsctl ] description: diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index 66391937d1b..6f59f4b134b 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_port version_added: 1.4 -author: David Stygstra +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch ports requirements: [ ovs-vsctl ] description: diff --git a/network/snmp_facts.py b/network/snmp_facts.py old mode 100755 new mode 100644 index 85fc148cba5..81a91ee6eb2 --- a/network/snmp_facts.py +++ b/network/snmp_facts.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: snmp_facts version_added: "1.9" -author: Patrick Ogenstad (@networklore) +author: "Patrick Ogenstad (@ogenstad)" short_description: Retrive facts for a device using SNMP. description: - Retrieve facts for a device using SNMP, the facts will be diff --git a/notification/campfire.py b/notification/campfire.py index 31e69fc5459..2400ad3ba40 100644 --- a/notification/campfire.py +++ b/notification/campfire.py @@ -43,7 +43,7 @@ options: # informational: requirements for nodes requirements: [ urllib2, cgi ] -author: Adam Garside +author: "Adam Garside (@fabulops)" ''' EXAMPLES = ''' diff --git a/notification/flowdock.py b/notification/flowdock.py index 009487fb438..34dad8db375 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: flowdock version_added: "1.2" -author: Matt Coddington +author: "Matt Coddington (@mcodd)" short_description: Send a message to a flowdock description: - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) @@ -85,8 +85,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [ ] ''' EXAMPLES = ''' @@ -104,6 +103,8 @@ EXAMPLES = ''' tags=tag1,tag2,tag3 ''' +import urllib + # =========================================== # Module execution. # diff --git a/notification/grove.py b/notification/grove.py index 8f4ec42be58..4e4a0b5b684 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -25,11 +25,11 @@ options: required: true url: description: - - Service URL for the web client + - Service URL for the web client required: false icon_url: description: - - Icon for the service + - Icon for the service required: false validate_certs: description: @@ -39,7 +39,7 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 -author: Jonas Pfenniger +author: "Jonas Pfenniger (@zimbatm)" ''' EXAMPLES = ''' @@ -49,6 +49,8 @@ EXAMPLES = ''' message=deployed {{ target }} ''' +import urllib + BASE_URL = 'https://grove.io/api/notice/%s/' # ============================================================== diff --git a/notification/hall.py b/notification/hall.py new file mode 100755 index 00000000000..05c1a981b73 --- /dev/null +++ b/notification/hall.py @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Billy Kimble +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = """ +module: hall +short_description: Send notification to Hall +description: + - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms." +version_added: "2.0" +author: Billy Kimble (@bkimble) +options: + room_token: + description: + - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)" + required: true + msg: + description: + - The message you wish to deliver as a notifcation + required: true + title: + description: + - The title of the message + required: true + picture: + description: + - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)" + required: false +""" + +EXAMPLES = """ +- name: Send Hall notifiation + local_action: + module: hall + room_token: + title: Nginx + msg: Created virtual host file on {{ inventory_hostname }} + +- name: Send Hall notification if EC2 servers were created. + when: ec2.instances|length > 0 + local_action: + module: hall + room_token: + title: Server Creation + msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." + with_items: ec2.instances +""" + +HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' + +def send_request_to_hall(module, room_token, payload): + headers = {'Content-Type': 'application/json'} + payload=module.jsonify(payload) + api_endpoint = HALL_API_ENDPOINT % (room_token) + response, info = fetch_url(module, api_endpoint, data=payload, headers=headers) + if info['status'] != 200: + secure_url = HALL_API_ENDPOINT % ('[redacted]') + module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg'])) + +def main(): + module = AnsibleModule( + argument_spec = dict( + room_token = dict(type='str', required=True), + msg = dict(type='str', required=True), + title = dict(type='str', required=True), + picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'), + ) + ) + + room_token = module.params['room_token'] + message = module.params['msg'] + title = module.params['title'] + picture = module.params['picture'] + payload = {'title': title, 'message': message, 'picture': picture} + send_request_to_hall(module, room_token, payload) + module.exit_json(msg="OK") + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() diff --git a/notification/hipchat.py b/notification/hipchat.py index 4ff95b32bf6..32689965cf9 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -58,13 +58,12 @@ options: description: - API url if using a self-hosted hipchat server required: false - default: 'https://api.hipchat.com/v1/rooms/message' + default: 'https://api.hipchat.com/v1' version_added: 1.6.0 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -author: WAKAYAMA Shirou +requirements: [ ] +author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' EXAMPLES = ''' @@ -75,11 +74,19 @@ EXAMPLES = ''' # HipChat module specific support methods. # -MSG_URI = "https://api.hipchat.com/v1/rooms/message" +import urllib -def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI): - '''sending message to hipchat''' +DEFAULT_URI = "https://api.hipchat.com/v1" + +MSG_URI_V1 = "/rooms/message" + +MSG_URI_V2 = "/room/{id_or_name}/message" +NOTIFY_URI_V2 = "/room/{id_or_name}/notification" + +def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V1): + '''sending message to hipchat v1 server''' + print "Sending message to v1 server" params = {} params['room_id'] = room @@ -94,8 +101,13 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text', else: params['notify'] = 0 - url = api + "?auth_token=%s" % (token) + url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() @@ -103,6 +115,37 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text', module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) +def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V2): + '''sending message to hipchat v2 server''' + print "Sending message to v2 server" + + headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} + + body = dict() + body['message'] = msg + body['color'] = color + body['message_format'] = msg_format + + if notify: + POST_URL = api + NOTIFY_URI_V2 + else: + POST_URL = api + MSG_URI_V2 + + url = POST_URL.replace('{id_or_name}',room) + data = json.dumps(body) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + # =========================================== # Module execution. # @@ -119,8 +162,8 @@ def main(): "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), - validate_certs = dict(default='yes', type='bool'), - api = dict(default=MSG_URI), + validate_certs=dict(default='yes', type='bool'), + api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) @@ -135,9 +178,12 @@ def main(): api = module.params["api"] try: - send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) + if api.find('/v2') != -1: + send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) + else: + send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) + module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) diff --git a/notification/irc.py b/notification/irc.py index a90834f820d..e6852c8510a 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -47,6 +47,12 @@ options: - The message body. required: true default: null + topic: + description: + - Set the channel topic + required: false + default: null + version_added: 2.0 color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). @@ -80,7 +86,9 @@ options: # informational: requirements for nodes requirements: [ socket ] -author: Jan-Piet Mens, Matt Martz +author: + - '"Jan-Piet Mens (@jpmens)"' + - '"Matt Martz (@sivel)"' ''' EXAMPLES = ''' @@ -104,7 +112,7 @@ import ssl from time import sleep -def send_msg(channel, msg, server='localhost', port='6667', key=None, +def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None, nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' @@ -161,6 +169,10 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, raise Exception('Timeout waiting for IRC JOIN response') sleep(0.5) + if topic is not None: + irc.send('TOPIC %s :%s\r\n' % (channel, topic)) + sleep(1) + irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) @@ -184,6 +196,7 @@ def main(): "blue", "black", "none"]), channel=dict(required=True), key=dict(), + topic=dict(), passwd=dict(), timeout=dict(type='int', default=30), use_ssl=dict(type='bool', default=False) @@ -194,6 +207,7 @@ def main(): server = module.params["server"] port = module.params["port"] nick = module.params["nick"] + topic = module.params["topic"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] @@ -203,7 +217,7 @@ def main(): use_ssl = module.params["use_ssl"] try: - send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) + send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) diff --git a/notification/jabber.py b/notification/jabber.py index 8a7eed37b33..1a19140a83d 100644 --- a/notification/jabber.py +++ b/notification/jabber.py @@ -1,5 +1,23 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see + DOCUMENTATION = ''' --- @@ -42,7 +60,7 @@ options: # informational: requirements for nodes requirements: [ xmpp ] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' diff --git a/notification/mail.py b/notification/mail.py index eb61ed32744..8be9a589cbf 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ --- -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" module: mail short_description: Send an email description: @@ -62,7 +62,6 @@ options: subject: description: - The subject of the email being sent. - aliases: [ msg ] required: true body: description: @@ -111,11 +110,27 @@ options: - The character set of email being sent default: 'us-ascii' required: false + subtype: + description: + - The minor mime type, can be either text or html. The major type is always text. + default: 'plain' + required: false + version_added: "2.0" """ EXAMPLES = ''' # Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.' + +# Sending an e-mail using Gmail SMTP servers +- local_action: mail + host='smtp.gmail.com' + port=587 + username=username@gmail.com + password='mysecret' + to="John Smith " + subject='Ansible-report' + body='System {{ ansible_hostname }} has been successfully provisioned.' # Send e-mail to a bunch of users, attaching files - local_action: mail @@ -129,6 +144,13 @@ EXAMPLES = ''' attach="/etc/group /tmp/pavatar2.png" headers=Reply-To=john@example.com|X-Special="Something or other" charset=utf8 +# Sending an e-mail using the remote machine, not the Ansible controller node +- mail: + host='localhost' + port=25 + to="John Smith " + subject='Ansible-report' + body='System {{ ansible_hostname }} has been successfully provisioned.' ''' import os @@ -167,7 +189,8 @@ def main(): body = dict(default=None), attach = dict(default=None), headers = dict(default=None), - charset = dict(default='us-ascii') + charset = dict(default='us-ascii'), + subtype = dict(default='plain') ) ) @@ -184,6 +207,7 @@ def main(): attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') + subtype = module.params.get('subtype') sender_phrase, sender_addr = parseaddr(sender) if not body: @@ -243,7 +267,7 @@ def main(): if len(cc_list) > 0: msg['Cc'] = ", ".join(cc_list) - part = MIMEText(body + "\n\n", _charset=charset) + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) msg.attach(part) if attach_files is not None: diff --git a/notification/mqtt.py b/notification/mqtt.py index d701bd9348a..c618ab69ae3 100644 --- a/notification/mqtt.py +++ b/notification/mqtt.py @@ -81,7 +81,7 @@ requirements: [ mosquitto ] notes: - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). -author: Jan-Piet Mens +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES = ''' diff --git a/notification/nexmo.py b/notification/nexmo.py index d4898c40cdb..89a246c0d90 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo description: - Send a SMS message via nexmo version_added: 1.6 -author: Matt Martz +author: "Matt Martz (@sivel)" options: api_key: description: @@ -71,6 +71,7 @@ EXAMPLES = """ msg: "{{ inventory_hostname }} completed" """ +import urllib NEXMO_API = 'https://rest.nexmo.com/sms/json' diff --git a/notification/osx_say.py b/notification/osx_say.py index 39e3da88c19..7c0ba844583 100644 --- a/notification/osx_say.py +++ b/notification/osx_say.py @@ -37,7 +37,9 @@ options: What voice to use required: false requirements: [ say ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' diff --git a/notification/pushbullet.py b/notification/pushbullet.py new file mode 100644 index 00000000000..52d785306ce --- /dev/null +++ b/notification/pushbullet.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +author: "Willy Barro (@willybarro)" +requirements: [ pushbullet.py ] +module: pushbullet +short_description: Sends notifications to Pushbullet +description: + - This module sends push notifications via Pushbullet to channels or devices. +version_added: "2.0" +options: + api_key: + description: + - Push bullet API token + required: true + channel: + description: + - The channel TAG you wish to broadcast a push notification, + as seen on the "My Channels" > "Edit your channel" at + Pushbullet page. + required: false + default: null + device: + description: + - The device NAME you wish to send a push notification, + as seen on the Pushbullet main page. + required: false + default: null + push_type: + description: + - Thing you wish to push. + required: false + default: note + choices: [ "note", "link" ] + title: + description: + - Title of the notification. + required: true + body: + description: + - Body of the notification, e.g. Details of the fault you're alerting. + required: false + +notes: + - Requires pushbullet.py Python package on the remote host. + You can install it via pip with ($ pip install pushbullet.py). + See U(https://github.com/randomchars/pushbullet.py) +''' + +EXAMPLES = ''' +# Sends a push notification to a device +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + title: "You may see this on Google Chrome" + +# Sends a link to a device +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + push_type: "link" + title: "Ansible Documentation" + body: "http://docs.ansible.com/" + +# Sends a push notification to a channel +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + channel: "my-awesome-channel" + title: "Broadcasting a message to the #my-awesome-channel folks" + +# Sends a push notification with title and body to a channel +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + channel: "my-awesome-channel" + title: "ALERT! Signup service is down" + body: "Error rate on signup service is over 90% for more than 2 minutes" +''' + +try: + from pushbullet import PushBullet + from pushbullet.errors import InvalidKeyError, PushError +except ImportError: + pushbullet_found = False +else: + pushbullet_found = True + +# =========================================== +# Main +# + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_key = dict(type='str', required=True), + channel = dict(type='str', default=None), + device = dict(type='str', default=None), + push_type = dict(type='str', default="note", choices=['note', 'link']), + title = dict(type='str', required=True), + body = dict(type='str', default=None) + ), + mutually_exclusive = ( + ['channel', 'device'], + ), + supports_check_mode=True + ) + + api_key = module.params['api_key'] + channel = module.params['channel'] + device = module.params['device'] + push_type = module.params['push_type'] + title = module.params['title'] + body = module.params['body'] + + if not pushbullet_found: + module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py") + + # Init pushbullet + try: + pb = PushBullet(api_key) + target = None + except InvalidKeyError: + module.fail_json(msg="Invalid api_key") + + # Checks for channel/device + if device is None and channel is None: + module.fail_json(msg="You need to provide a channel or a device.") + + # Search for given device + if device is not None: + devices_by_nickname = {} + for d in pb.devices: + devices_by_nickname[d.nickname] = d + + if device in devices_by_nickname: + target = devices_by_nickname[device] + else: + module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + + # Search for given channel + if channel is not None: + channels_by_tag = {} + for c in pb.channels: + channels_by_tag[c.channel_tag] = c + + if channel in channels_by_tag: + target = channels_by_tag[channel] + else: + module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + + # If in check mode, exit saying that we succeeded + if module.check_mode: + module.exit_json(changed=False, msg="OK") + + # Send push notification + try: + target.push_note(title, body) + module.exit_json(changed=False, msg="OK") + except PushError as e: + module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) + + module.fail_json(msg="An unknown error has occurred") + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/notification/pushover.py b/notification/pushover.py new file mode 100644 index 00000000000..505917189e4 --- /dev/null +++ b/notification/pushover.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2012, Jim Richardson +# All rights reserved. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +### + +DOCUMENTATION = ''' +--- +module: pushover +version_added: "2.0" +short_description: Send notifications via u(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +options: + msg: + description: + What message you wish to send. + required: true + app_token: + description: + Pushover issued token identifying your pushover app. + required: true + user_key: + description: + Pushover issued authentication key for your user. + required: true + pri: + description: Message priority (see u(https://pushover.net) for details.) + required: false + +author: "Jim Richardson (@weaselkeeper)" +''' + +EXAMPLES = ''' +- local_action: pushover msg="{{inventory_hostname}} has exploded in flames, + It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 +''' + +import urllib +import httplib + + +class pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + + def __init__(self): + self.host, self.port = 'api.pushover.net', 443 + + def run(self): + ''' Do, whatever it is, we do. ''' + # parse config + conn = httplib.HTTPSConnection(self.host, self.port) + conn.request("POST", "/1/messages.json", + urllib.urlencode(self.options), + {"Content-type": "application/x-www-form-urlencoded"}) + conn.getresponse() + return + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + app_token=dict(required=True), + user_key=dict(required=True), + pri=dict(required=False, default=0), + ), + ) + + msg_object = pushover() + msg_object.options = {} + msg_object.options['user'] = module.params['user_key'] + msg_object.options['token'] = module.params['app_token'] + msg_object.options['priority'] = module.params['pri'] + msg_object.options['message'] = module.params['msg'] + try: + msg_object.run() + except: + module.fail_json(msg='Unable to send msg via pushover') + + module.exit_json(msg=msg, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/notification/sendgrid.py b/notification/sendgrid.py new file mode 100644 index 00000000000..e1ae7b7749f --- /dev/null +++ b/notification/sendgrid.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "2.0" +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - Sends an email with a SendGrid account through their API, not through + the SMTP service. +notes: + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account. +options: + username: + description: + username for logging into the SendGrid account + required: true + password: + description: password that corresponds to the username + required: true + from_address: + description: + the address in the "from" field for the email + required: true + to_addresses: + description: + a list with one or more recipient email addresses + required: true + subject: + description: + the desired subject for the email + required: true + +author: "Matt Makai (@makaimc)" +''' + +EXAMPLES = ''' +# send an email to a single recipient that the deployment was successful +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost + +# send an email to more than one recipient that the build failed +- sendgrid + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost +''' + +# ======================================= +# sendgrid module support methods +# +import urllib +import urllib2 + +import base64 + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body): + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key':password, + 'from':from_address, 'subject': subject, 'text': body} + encoded_data = urllib.urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + if isinstance(recipient, unicode): + recipient = recipient.encode('utf-8') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + request = urllib2.Request(SENDGRID_URI) + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/json') + return urllib2.urlopen(request, encoded_data) + + +# ======================================= +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=True), + password=dict(required=True, no_log=True), + from_address=dict(required=True), + to_addresses=dict(required=True, type='list'), + subject=dict(required=True), + body=dict(required=True), + ), + supports_check_mode=True + ) + + username = module.params['username'] + password = module.params['password'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + + try: + response = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body) + except Exception: + module.fail_json(msg="unable to send email through SendGrid API") + + module.exit_json(msg=subject, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/notification/slack.py b/notification/slack.py index 1ae748247f9..baabe4f58d2 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -24,7 +24,7 @@ short_description: Send Slack notifications description: - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration version_added: 1.6 -author: Ramon de la Fuente +author: "Ramon de la Fuente (@ramondelafuente)" options: domain: description: @@ -89,6 +89,17 @@ options: choices: - 'yes' - 'no' + color: + version_added: 2.0 + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + required: false + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' """ EXAMPLES = """ @@ -111,16 +122,29 @@ EXAMPLES = """ link_names: 0 parse: 'none' +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + slack: + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} is alive!" + color: good + username: "" + icon_url: "" """ OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color): + if color == 'normal': + payload = dict(text=text) + else: + payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: - payload['channel'] = channel if (channel[0] == '#') else '#'+channel + if (channel[0] == '#') or (channel[0] == '@'): + payload['channel'] = channel + else: + payload['channel'] = '#'+channel if username is not None: payload['username'] = username if icon_emoji is not None: @@ -161,8 +185,8 @@ def main(): icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), - validate_certs = dict(default='yes', type='bool'), + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']) ) ) @@ -175,8 +199,9 @@ def main(): icon_emoji = module.params['icon_emoji'] link_names = module.params['link_names'] parse = module.params['parse'] + color = module.params['color'] - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color) do_notify_slack(module, domain, token, payload) module.exit_json(msg="OK") diff --git a/notification/sns.py b/notification/sns.py index f2ed178554e..70030d66196 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages description: - The M(sns) module sends notifications to a topic on your Amazon SNS account version_added: 1.6 -author: Michael J. Schultz +author: "Michael J. Schultz (@mjschultz)" options: msg: description: @@ -105,6 +105,7 @@ from ansible.module_utils.ec2 import * try: import boto + import boto.ec2 import boto.sns except ImportError: print "failed=True msg='boto required for this module'" diff --git a/notification/twilio.py b/notification/twilio.py index 8969c28aa50..ee12d987e9e 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Matt Makai +# (c) 2015, Matt Makai # # This file is part of Ansible # @@ -24,18 +24,20 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through an the Twilio SMS service. + - Sends a text message to a phone number through the Twilio messaging API. notes: - - Like the other notification modules, this one requires an external + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: - user's account id for Twilio found on the account page + user's Twilio account token found on the account page required: true auth_token: - description: user's authentication token for Twilio found on the account page + description: user's Twilio authentication token required: true msg: description: @@ -43,58 +45,87 @@ options: required: true to_number: description: - what phone number to send the text message to, format +15551112222 + one or more phone numbers to send the text message to, + format +15551112222 required: true from_number: description: - what phone number to send the text message from, format +15551112222 + the Twilio number to send the text message from, format +15551112222 required: true - -requirements: [ urllib, urllib2 ] -author: Matt Makai + media_url: + description: + a URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS + required: false + +author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - +# send an SMS about the build status to (555) 303 5681 +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "All servers with webserver role are now configured." + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "This server's configuration is now complete." + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15553258899" + to_number: + - "+15551113232" + - "+12025551235" + - "+19735559010" + delegate_to: localhost + +# send an MMS to a single recipient with an update on the deployment +# and an image of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "Deployment complete!" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + media_url: "https://demo.twilio.com/logo.png" + delegate_to: localhost ''' # ======================================= -# text module support methods +# twilio module support methods # -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") +import urllib +import urllib2 import base64 -def post_text(module, account_sid, auth_token, msg, from_number, to_number): +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) - AGENT = "Ansible/1.5" + AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} + if media_url: + data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) request = urllib2.Request(URI) base64string = base64.encodestring('%s:%s' % \ (account_sid, auth_token)).replace('\n', '') request.add_header('User-Agent', AGENT) request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') + request.add_header('Accept', 'application/json') request.add_header('Authorization', 'Basic %s' % base64string) return urllib2.urlopen(request, encoded_data) @@ -112,23 +143,29 @@ def main(): msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), + media_url=dict(default=None, required=False), ), supports_check_mode=True ) - + account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] + media_url = module.params['media_url'] + + if not isinstance(to_number, list): + to_number = [to_number] - try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: - module.fail_json(msg="unable to send text message to %s" % to_number) + for number in to_number: + try: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, number, media_url) + except Exception: + module.fail_json(msg="unable to send message to %s" % number) - module.exit_json(msg=msg, changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * diff --git a/notification/typetalk.py b/notification/typetalk.py index b987acbe837..002c8b5cc85 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -25,23 +25,17 @@ options: description: - message body required: true -requirements: [ urllib, urllib2, json ] -author: Takashi Someda +requirements: [ json ] +author: "Takashi Someda (@tksmd)" ''' EXAMPLES = ''' - typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" ''' -try: - import urllib -except ImportError: - urllib = None +import urllib -try: - import urllib2 -except ImportError: - urllib2 = None +import urllib2 try: import json @@ -96,8 +90,8 @@ def main(): supports_check_mode=False ) - if not (urllib and urllib2 and json): - module.fail_json(msg="urllib, urllib2 and json modules are required") + if not json: + module.fail_json(msg="json module is required") client_id = module.params["client_id"] client_secret = module.params["client_secret"] diff --git a/packaging/dpkg_selections b/packaging/dpkg_selections new file mode 100644 index 00000000000..f09ff9a9f00 --- /dev/null +++ b/packaging/dpkg_selections @@ -0,0 +1,60 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +--- +module: dpkg_selections +short_description: Dpkg package selection selections +description: + - Change dpkg package selection state via --get-selections and --set-selections. +version_added: "2.0" +author: Brian Brazil +options: + name: + description: + - Name of the package + required: true + selection: + description: + - The selection state to set the package to. + choices: [ 'install', 'hold', 'deinstall', 'purge' ] + required: true +notes: + - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that. +''' +EXAMPLES = ''' +# Prevent python from being upgraded. +- dpkg_selections: name=python selection=hold +''' + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + selection = dict(choices=['install', 'hold', 'deinstall', 'purge']) + ), + supports_check_mode=True, + ) + + dpkg = module.get_bin_path('dpkg', True) + + name = module.params['name'] + selection = module.params['selection'] + + # Get current settings. + rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True) + if not out: + current = 'not present' + else: + current = out.split()[1] + + changed = current != selection + + if module.check_mode or not changed: + module.exit_json(changed=changed, before=current, after=selection) + + module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True) + module.exit_json(changed=changed, before=current, after=selection) + + +from ansible.module_utils.basic import * +main() diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 3fccf51056b..7af8136a445 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -25,7 +25,7 @@ short_description: Manage bower packages with bower description: - Manage bower packages with bower version_added: 1.9 -author: Michael Warkentin +author: "Michael Warkentin (@mwarkentin)" options: name: description: @@ -86,7 +86,7 @@ class Bower(object): def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = ["bower"] + args + cmd = ["bower"] + args + ['--config.interactive=false', '--allow-root'] if self.name: cmd.append(self.name_version) @@ -154,7 +154,7 @@ def main(): name = module.params['name'] offline = module.params['offline'] - path = module.params['path'] + path = os.path.expanduser(module.params['path']) state = module.params['state'] version = module.params['version'] diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py new file mode 100644 index 00000000000..82ef2838a9a --- /dev/null +++ b/packaging/language/bundler.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tim Hoiberg +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION=''' +--- +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem +version_added: "2.0.0" +options: + executable: + description: + - The path to the bundler executable + required: false + default: null + state: + description: + - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + required: false + choices: [present, latest] + default: present + chdir: + description: + - The directory to execute the bundler commands from. This directoy needs to contain a valid Gemfile or .bundle/ directory + required: false + default: temporary working directory + exclude_groups: + description: + - A list of Gemfile groups to exclude during operations. This only applies when state is C(present). Bundler considers this a 'remembered' + property for the Gemfile and will automatically exclude groups in future operations even if C(exclude_groups) is not set + required: false + default: null + clean: + description: + - Only applies if state is C(present). If set removes any gems on the target host that are not in the gemfile + required: false + choices: [yes, no] + default: "no" + gemfile: + description: + - Only applies if state is C(present). The path to the gemfile to use to install gems. + required: false + default: Gemfile in current directory + local: + description: + - If set only installs gems from the cache on the target host + required: false + choices: [yes, no] + default: "no" + deployment_mode: + description: + - Only applies if state is C(present). If set it will only install gems that are in the default or production groups. Requires a Gemfile.lock + file to have been created prior + required: false + choices: [yes, no] + default: "no" + user_install: + description: + - Only applies if state is C(present). Installs gems in the local user's cache or for all users + required: false + choices: [yes, no] + default: "yes" + gem_path: + description: + - Only applies if state is C(present). Specifies the directory to install the gems into. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: RubyGems gem paths + binstub_directory: + description: + - Only applies if state is C(present). Specifies the directory to install any gem bins files to. When executed the bin files will run within + the context of the Gemfile and fail if any required gem dependencies are not installed. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: null + extra_args: + description: + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation for more + information + required: false + default: null +author: Tim Hoiberg +''' + +EXAMPLES=''' +# Installs gems from a Gemfile in the current directory +- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle + +# Excludes the production group from installing +- bundler: state=present exclude_groups=production + +# Only install gems from the default and production groups +- bundler: state=present deployment=yes + +# Installs gems using a Gemfile in another directory +- bundler: state=present gemfile=../rails_project/Gemfile + +# Updates Gemfile in another directory +- bundler: state=latest chdir=~/rails_project +''' + + +def get_bundler_executable(module): + if module.params.get('executable'): + return module.params.get('executable').split(' ') + else: + return [ module.get_bin_path('bundle', True) ] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False), + exclude_groups=dict(default=None, required=False, type='list'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False), + binstub_directory=dict(default=None, required=False), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + executable = module.params.get('executable') + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_install_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +from ansible.module_utils.basic import * +main() \ No newline at end of file diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 23d9dc5031e..8e11d25216b 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: composer -author: Dimitrios Tydeas Mengidis +author: "Dimitrios Tydeas Mengidis (@dmtrs)" short_description: Dependency Manager for PHP version_added: "1.6" description: @@ -82,6 +82,14 @@ options: default: "yes" choices: [ "yes", "no" ] aliases: [ "optimize-autoloader" ] + ignore_platform_reqs: + version_added: "2.0" + description: + - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "ignore-platform-reqs" ] requirements: - php - composer installed in bin path (recommended /usr/local/bin) @@ -116,42 +124,49 @@ def composer_install(module, command, options): def main(): module = AnsibleModule( argument_spec = dict( - command = dict(default="install", type="str", required=False), - working_dir = dict(aliases=["working-dir"], required=True), - prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), - prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), - no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), - no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), - no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), - optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + command = dict(default="install", type="str", required=False), + working_dir = dict(aliases=["working-dir"], required=True), + prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), + prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), + no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), + no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), + no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), + optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + ignore_platform_reqs = dict(default="no", type="bool", aliases=["ignore-platform-reqs"]), ), supports_check_mode=True ) - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + options = [] - options = set([]) # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") + options.append('--no-ansi') + options.append('--no-progress') + options.append('--no-interaction') - if module.check_mode: - options.add("--dry-run") - del module.params['CHECKMODE'] + options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) - # Get composer command with fallback to default + # Get composer command with fallback to default command = module.params['command'] - del module.params['command']; # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) + if module.params['prefer_source']: + options.append('--prefer-source') + if module.params['prefer_dist']: + options.append('--prefer-dist') + if module.params['no_dev']: + options.append('--no-dev') + if module.params['no_scripts']: + options.append('--no-scripts') + if module.params['no_plugins']: + options.append('--no-plugins') + if module.params['optimize_autoloader']: + options.append('--optimize-autoloader') + if module.params['ignore_platform_reqs']: + options.append('--ignore-platform-reqs') + + if module.check_mode: + options.append('--dry-run') rc, out, err = composer_install(module, command, options) diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index ec344b7aa9b..02b306b669c 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -73,7 +73,7 @@ examples: description: Install I(Dancer) perl package from a specific mirror notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: Franck Cuny +author: "Franck Cuny (@franckcuny)" ''' def _is_package_installed(module, name, locallib, cpanm): diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py new file mode 100644 index 00000000000..3e196dd93a5 --- /dev/null +++ b/packaging/language/maven_artifact.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +__author__ = 'cschmidt' + +from lxml import etree +from urllib2 import Request, urlopen, URLError, HTTPError +import os +import hashlib +import sys +import base64 + +DOCUMENTATION = ''' +--- +module: maven_artifact +short_description: Downloads an Artifact from a Maven Repository +version_added: "2.0" +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve + - snapshots or release versions of the artifact and will resolve the latest available version if one is not + - available. +author: "Chris Schmidt (@chrisisbeef)" +requirements: + - "python >= 2.6" + - lxml +options: + group_id: + description: The Maven groupId coordinate + required: true + artifact_id: + description: The maven artifactId coordinate + required: true + version: + description: The maven version coordinate + required: false + default: latest + classifier: + description: The maven classifier coordinate + required: false + default: null + extension: + description: The maven type/extension coordinate + required: false + default: jar + repository_url: + description: The URL of the Maven Repository to download from + required: false + default: http://repo1.maven.org/maven2 + username: + description: The username to authenticate as to the Maven Repository + required: false + default: null + password: + description: The passwor to authenticate with to the Maven Repository + required: false + default: null + dest: + description: The path where the artifact should be written to + required: true + default: false + state: + description: The desired state of the artifact + required: true + default: present + choices: [present,absent] +''' + +EXAMPLES = ''' +# Download the latest version of the commons-collections artifact from Maven Central +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar + +# Download Apache Commons-Collections 3.2 from Maven Central +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar + +# Download an artifact from a private repository requiring authentication +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar + +# Download a WAR File to the Tomcat webapps directory to be deployed +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war +''' + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = self.group_id.replace(".", "/") + "/" + self.artifact_id + if with_version and self.version: + return base + "/" + self.version + else: + return base + + def _generate_filename(self): + if not self.classifier: + return self.artifact_id + "." + self.extension + else: + return self.artifact_id + "-" + self.classifier + "." + self.extension + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + if self.classifier: + return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + else: + return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[len(parts) - 1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, base="http://repo1.maven.org/maven2", username=None, password=None): + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.user_agent = "Maven Artifact Downloader/1.0" + self.username = username + self.password = password + + def _find_latest_version_available(self, artifact): + path = "/%s/maven-metadata.xml" % (artifact.path(False)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.is_snapshot(): + path = "/%s/maven-metadata.xml" % (artifact.path()) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] + buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) + else: + return self._uri_for_artifact(artifact) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension + + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension + + def _request(self, url, failmsg, f): + if not self.username: + headers = {"User-Agent": self.user_agent} + else: + headers = { + "User-Agent": self.user_agent, + "Authorization": "Basic " + base64.b64encode(self.username + ":" + self.password) + } + req = Request(url, None, headers) + try: + response = urlopen(req) + except HTTPError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + except URLError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + else: + return f(response) + + + def download(self, artifact, filename=None): + filename = artifact.get_filename(filename) + if not artifact.version or artifact.version == "latest": + artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), + artifact.classifier, artifact.extension) + + url = self.find_uri_for_artifact(artifact) + if not self.verify_md5(filename, url + ".md5"): + response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) + if response: + with open(filename, 'w') as f: + # f.write(response.read()) + self._write_chunks(response, f, report_hook=self.chunk_report) + return True + else: + return False + else: + return True + + def chunk_report(self, bytes_so_far, chunk_size, total_size): + percent = float(bytes_so_far) / total_size + percent = round(percent * 100, 2) + sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % + (bytes_so_far, total_size, percent)) + + if bytes_so_far >= total_size: + sys.stdout.write('\n') + + def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): + total_size = response.info().getheader('Content-Length').strip() + total_size = int(total_size) + bytes_so_far = 0 + + while 1: + chunk = response.read(chunk_size) + bytes_so_far += len(chunk) + + if not chunk: + break + + file.write(chunk) + if report_hook: + report_hook(bytes_so_far, chunk_size, total_size) + + return bytes_so_far + + def verify_md5(self, file, remote_md5): + if not os.path.exists(file): + return False + else: + local_md5 = self._local_md5(file) + remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) + return local_md5 == remote + + def _local_md5(self, file): + md5 = hashlib.md5() + with open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), ''): + md5.update(chunk) + return md5.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + group_id = dict(default=None), + artifact_id = dict(default=None), + version = dict(default=None), + classifier = dict(default=None), + extension = dict(default=None), + repository_url = dict(default=None), + username = dict(default=None), + password = dict(default=None), + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + dest = dict(default=None), + ) + ) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + classifier = module.params["classifier"] + extension = module.params["extension"] + repository_url = module.params["repository_url"] + repository_username = module.params["username"] + repository_password = module.params["password"] + state = module.params["state"] + dest = module.params["dest"] + + if not repository_url: + repository_url = "http://repo1.maven.org/maven2" + + downloader = MavenDownloader(repository_url, repository_username, repository_password) + + try: + artifact = Artifact(group_id, artifact_id, version, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + prev_state = "absent" + if os.path.isdir(dest): + dest = dest + "/" + artifact_id + "-" + version + "." + extension + if os.path.lexists(dest): + prev_state = "present" + else: + path = os.path.dirname(dest) + if not os.path.exists(path): + os.makedirs(path) + + if prev_state == "present": + module.exit_json(dest=dest, state=state, changed=False) + + try: + if downloader.download(artifact, dest): + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + else: + module.fail_json(msg="Unable to download the artifact") + except ValueError as e: + module.fail_json(msg=e.args[0]) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 8407589116a..d804efff331 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -25,7 +25,7 @@ short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" options: name: description: diff --git a/packaging/language/pear.py b/packaging/language/pear.py new file mode 100644 index 00000000000..5762f9c815c --- /dev/null +++ b/packaging/language/pear.py @@ -0,0 +1,227 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer +# (c) 2015, Jonathan Lestrelin +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +version_added: 2.0 +author: + - "'jonathan.lestrelin' " +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: true + + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent", "latest"] +''' + +EXAMPLES = ''' +# Install pear package +- pear: name=Net_URL2 state=present + +# Install pecl package +- pear: name=pecl/json_post state=present + +# Upgrade package +- pear: name=Net_URL2 state=latest + +# Remove packages +- pear: name=Net_URL2,pecl/json_post state=absent +''' + +import os + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': continue + return installed + return None + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + +def query_package(module, name, state="present"): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + if state == "present": + lcmd = "pear info %s" % (name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = "pear remote-info %s" % (name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = "pear uninstall %s" % (package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages): + install_c = 0 + + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + cmd = "pear %s %s" % (command, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s" % (package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + + +def exe_exists(program): + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): + return True + + return False + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=['pkg']), + state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])), + required_one_of = [['name']], + supports_check_mode = True) + + if not exe_exists("pear"): + module.fail_json(msg="cannot find pear executable in PATH") + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index 222fe4fa222..7afbee44c54 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -92,8 +92,10 @@ options: notes: [] # informational: requirements for nodes -requirements: [ dnf ] -author: Cristian van Ee +requirements: + - dnf + - yum-utils (for repoquery) +author: "Cristian van Ee (@DJMuggs)" ''' EXAMPLES = ''' @@ -137,18 +139,12 @@ def log(msg): def dnf_base(conf_file=None, cachedir=False): my = dnf.Base() - my.logging.verbose_level=0 - my.logging.verbose_level=0 + my.conf.debuglevel=0 if conf_file and os.path.exists(conf_file): - my.config = conf_file - if cachedir or os.geteuid() != 0: - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = cachedir.dnf.Conf() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 + my.conf.config_file_path = conf_file + my.conf.read() + my.read_all_repos() + my.fill_sack() return my @@ -157,7 +153,7 @@ def install_dnf_utils(module): if not module.check_mode: dnf_path = module.get_bin_path('dnf') if dnf_path: - rc, so, se = module.run_command('%s -y install dnf-plugins-core' % dnf_path) + rc, so, se = module.run_command('%s -y install yum-utils' % dnf_path) if rc == 0: this_path = module.get_bin_path('repoquery') global repoquery @@ -812,9 +808,9 @@ def main(): if params['install_repoquery'] and not repoquery and not module.check_mode: install_dnf_utils(module) + if not repoquery: + module.fail_json(msg="repoquery is required to use this module at this time. Please install the yum-utils package.") if params['list']: - if not repoquery: - module.fail_json(msg="repoquery is required to use list= with this module. Please install the dnf-utils package.") results = dict(results=list_stuff(module, params['conf_file'], params['list'])) module.exit_json(**results) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 2ecac0c4ace..91888ba6bca 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -22,7 +22,9 @@ DOCUMENTATION = ''' --- module: homebrew -author: Andrew Dunham and Daniel Jaouen +author: + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" short_description: Package manager for Homebrew description: - Manages Homebrew packages @@ -31,7 +33,8 @@ options: name: description: - name of package to install/remove - required: true + required: false + default: None state: description: - state of the package @@ -48,7 +51,7 @@ options: description: - upgrade all homebrew packages required: false - default: no + default: "no" choices: [ "yes", "no" ] install_options: description: @@ -113,6 +116,7 @@ class Homebrew(object): VALID_PACKAGE_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots + / # slash (for taps) \+ # plusses - # dashes ''' @@ -831,5 +835,8 @@ def main(): module.exit_json(changed=changed, msg=message) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() + diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index dede8d4bb36..e1b721a97b4 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: homebrew_cask -author: Daniel Jaouen +author: "Daniel Jaouen (@danieljaouen)" short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. @@ -32,7 +32,7 @@ options: state: description: - state of the cask - choices: [ 'installed', 'uninstalled' ] + choices: [ 'present', 'absent' ] required: false default: present ''' @@ -509,5 +509,8 @@ def main(): module.exit_json(changed=changed, msg=message) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() + diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index a79ba076a8a..c6511f0c7b2 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -24,7 +24,7 @@ import re DOCUMENTATION = ''' --- module: homebrew_tap -author: Daniel Jaouen +author: "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. @@ -52,7 +52,7 @@ homebrew_tap: tap=homebrew/dupes,homebrew/science state=present def a_valid_tap(tap): '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') return regex.match(tap) @@ -211,5 +211,7 @@ def main(): module.exit_json(changed=changed, msg=msg) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/packaging/os/layman.py b/packaging/os/layman.py index 57c03528c9e..c9d6b8ed333 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -25,12 +25,15 @@ from urllib2 import Request, urlopen, URLError DOCUMENTATION = ''' --- module: layman -author: Jakub Jirutka +author: "Jakub Jirutka (@jirutka)" version_added: "1.6" short_description: Manage Gentoo overlays description: - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman must be installed on a managed node prior using this module. +requirements: + - "python >= 2.6" + - layman python module options: name: description: diff --git a/packaging/os/macports.py b/packaging/os/macports.py index ae7010b1cbd..ca3a0f97426 100644 --- a/packaging/os/macports.py +++ b/packaging/os/macports.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: macports -author: Jimmy Tang +author: "Jimmy Tang (@jcftang)" short_description: Package manager for MacPorts description: - Manages MacPorts packages diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 14b4ff46024..1f331261d98 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Patrik Lundin +# (c) 2013, Patrik Lundin # # This file is part of Ansible # @@ -25,7 +25,7 @@ import syslog DOCUMENTATION = ''' --- module: openbsd_pkg -author: Patrik Lundin +author: "Patrik Lundin (@eest)" version_added: "1.1" short_description: Manage packages on OpenBSD. description: diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py index 95afd6fd8bd..5b75ad1a260 100644 --- a/packaging/os/opkg.py +++ b/packaging/os/opkg.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: opkg -author: Patrick Pelletier +author: "Patrick Pelletier (@skinp)" short_description: Package manager for OpenWrt description: - Manages OpenWrt packages diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index a91f8e3054d..102865bc443 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -27,7 +27,9 @@ description: - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. version_added: "1.0" -author: Afterburn +author: + - "'Aaron Bull Schaefer (@elasticdog)' " + - "Afterburn" notes: [] requirements: [] options: @@ -76,7 +78,7 @@ EXAMPLES = ''' # Recursively remove package baz - pacman: name=baz state=absent recurse=yes -# Run the equivalent of "pacman -Syy" as a separate step +# Run the equivalent of "pacman -Sy" as a separate step - pacman: update_cache=yes ''' @@ -122,7 +124,7 @@ def query_package(module, name, state="present"): def update_package_db(module): - cmd = "pacman -Syy" + cmd = "pacman -Sy" rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index eea860e7be2..837eefd243e 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5 -author: Peter Oliver +author: "Peter Oliver (@mavit)" short_description: Manages packages with the Solaris 11 Image Packaging System version_added: 1.9 description: @@ -39,6 +39,13 @@ options: required: false default: present choices: [ present, latest, absent ] + accept_licenses: + description: + - Accept any licences. + required: false + default: false + choices: [ true, false ] + aliases: [ accept_licences, accept ] ''' EXAMPLES = ''' # Install Vim: @@ -70,6 +77,11 @@ def main(): 'removed', ] ), + accept_licenses=dict( + choices=BOOLEANS, + default=False, + aliases=['accept_licences', 'accept'], + ), ) ) @@ -89,14 +101,14 @@ def main(): packages.append(fragment) if params['state'] in ['present', 'installed']: - ensure(module, 'present', packages) + ensure(module, 'present', packages, params) elif params['state'] in ['latest']: - ensure(module, 'latest', packages) + ensure(module, 'latest', packages, params) elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', packages) + ensure(module, 'absent', packages, params) -def ensure(module, state, packages): +def ensure(module, state, packages, params): response = { 'results': [], 'msg': '', @@ -116,10 +128,21 @@ def ensure(module, state, packages): }, } + if params['accept_licenses']: + accept_licenses = ['--accept'] + else: + accept_licenses = [] + to_modify = filter(behaviour[state]['filter'], packages) if to_modify: rc, out, err = module.run_command( - ['pkg', behaviour[state]['subcommand'], '-q', '--'] + to_modify + [ + 'pkg', behaviour[state]['subcommand'] + ] + + accept_licenses + + [ + '-q', '--' + ] + to_modify ) response['rc'] = rc response['results'].append(out) @@ -133,12 +156,12 @@ def ensure(module, state, packages): def is_installed(module, package): rc, out, err = module.run_command(['pkg', 'list', '--', package]) - return True if rc == 0 else False + return not bool(int(rc)) def is_latest(module, package): rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) - return True if rc == 1 else False + return bool(int(rc)) from ansible.module_utils.basic import * diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 63c62059203..3881f5dd0b8 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5_publisher -author: Peter Oliver +author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers version_added: 1.9 description: @@ -122,10 +122,15 @@ def set_publisher(module, params): args.append('--remove-mirror=*') args.extend(['--add-mirror=' + u for u in params['mirror']]) - if params['sticky'] != None: - args.append('--sticky' if params['sticky'] else '--non-sticky') - if params['enabled'] != None: - args.append('--enable' if params['enabled'] else '--disable') + if params['sticky'] != None and params['sticky']: + args.append('--sticky') + elif params['sticky'] != None: + args.append('--non-sticky') + + if params['enabled'] != None and params['enabled']: + args.append('--enable') + elif params['enabled'] != None: + args.append('--disable') rc, out, err = module.run_command( ["pkg", "set-publisher"] + args + [name], diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py index f4c203e56e0..e600026409b 100644 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -30,7 +30,9 @@ description: - "The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" -author: Shaun Zinck, Larry Gilbert +author: + - "Larry Gilbert (L2G)" + - "Shaun Zinck (@szinck)" notes: - "Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as @@ -76,9 +78,19 @@ def query_package(module, pkgin_path, name): * False - not installed or not found """ + # test whether '-p' (parsable) flag is supported. + rc, out, err = module.run_command("%s -p -v" % pkgin_path) + + if rc == 0: + pflag = '-p' + splitchar = ';' + else: + pflag = '' + splitchar = ' ' + # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. - rc, out, err = module.run_command("%s search \"^%s$\"" % (pkgin_path, name)) + rc, out, err = module.run_command("%s %s search \"^%s$\"" % (pkgin_path, pflag, name)) # rc will not be 0 unless the search was a success if rc == 0: @@ -93,7 +105,7 @@ def query_package(module, pkgin_path, name): # '<' - installed but out of date # '=' - installed and up to date # '>' - installed but newer than the repository version - pkgname_with_version, raw_state = out.split(' ')[0:2] + pkgname_with_version, raw_state = out.split(splitchar)[0:2] # Strip version # (results in sth like 'gcc47-libs') diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index 1aa8e0c737f..c0819dbe5b8 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,7 +63,7 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false -author: bleader +author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. ''' @@ -252,9 +252,8 @@ def annotate_packages(module, pkgng_path, packages, annotation): for package in packages: for _annotation in annotations: - annotate_c += ( 1 if operation[_annotation['operation']]( - module, pkgng_path, package, - _annotation['tag'], _annotation['value']) else 0 ) + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 if annotate_c > 0: return (True, "added %s annotations." % annotate_c) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 635617b4efe..3a4720630cf 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -32,7 +32,7 @@ description: - Pkgutil is an advanced packaging system, which resolves dependency on installation. It is designed for CSW packages. version_added: "1.3" -author: Alexander Winkler +author: "Alexander Winkler (@dermute)" options: name: description: diff --git a/packaging/os/portage.py b/packaging/os/portage.py index ab96cb22e60..7be55db3ca8 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -147,7 +147,9 @@ options: choices: [ "yes" ] requirements: [ gentoolkit ] -author: Yap Sok Ann, Andrew Udvare +author: + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare" notes: [] ''' @@ -231,7 +233,7 @@ def sync_repositories(module, webrsync=False): webrsync_path = module.get_bin_path('emerge-webrsync', required=True) cmd = '%s --quiet' % webrsync_path else: - cmd = '%s --sync --quiet' % module.emerge_path + cmd = '%s --sync --quiet --ask=n' % module.emerge_path rc, out, err = module.run_command(cmd) if rc != 0: @@ -252,6 +254,8 @@ def emerge_packages(module, packages): break else: module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') args = [] emerge_flags = { @@ -267,14 +271,14 @@ def emerge_packages(module, packages): 'verbose': '--verbose', 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', } for flag, arg in emerge_flags.iteritems(): if p[flag]: args.append(arg) - # usepkgonly implies getbinpkg - if p['usepkgonly'] and not p['getbinpkg']: - args.append('--getbinpkg') + if p['usepkg'] and p['usepkgonly']: + module.fail_json(msg='Use only one of usepkg, usepkgonly') cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: @@ -296,13 +300,18 @@ def emerge_packages(module, packages): changed = True for line in out.splitlines(): if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' break else: changed = False + msg = 'No packages installed.' module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages installed.', + msg=msg, ) @@ -406,6 +415,7 @@ def main(): sync=dict(default=None, choices=['yes', 'web']), getbinpkg=dict(default=None, choices=['yes']), usepkgonly=dict(default=None, choices=['yes']), + usepkg=dict(default=None, choices=['yes']), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], @@ -422,7 +432,9 @@ def main(): if not p['package']: module.exit_json(msg='Sync successfully finished.') - packages = p['package'].split(',') if p['package'] else [] + packages = [] + if p['package']: + packages.extend(p['package'].split(',')) if p['depclean']: if packages and p['state'] not in portage_absent_states: diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index 068f413af72..b4e3044167e 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -43,7 +43,7 @@ options: choices: [ 'yes', 'no' ] required: false default: yes -author: berenddeboer +author: "berenddeboer (@berenddeboer)" ''' EXAMPLES = ''' diff --git a/packaging/os/svr4pkg.py b/packaging/os/svr4pkg.py index e95d4d8643f..5d8bac17eaa 100644 --- a/packaging/os/svr4pkg.py +++ b/packaging/os/svr4pkg.py @@ -30,7 +30,7 @@ description: - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. version_added: "0.9" -author: Boyd Adamson +author: "Boyd Adamson (@brontitall)" options: name: description: @@ -209,15 +209,25 @@ def main(): (rc, out, err) = package_uninstall(module, name, src, category) out = out[:75] - # Success, Warning, Interruption, Reboot all, Reboot this return codes + # Returncodes as per pkgadd(1m) + # 0 Successful completion + # 1 Fatal error. + # 2 Warning. + # 3 Interruption. + # 4 Administration. + # 5 Administration. Interaction is required. Do not use pkgadd -n. + # 10 Reboot after installation of all packages. + # 20 Reboot after installation of this package. + # 99 (observed) pkgadd: ERROR: could not process datastream from if rc in (0, 2, 3, 10, 20): result['changed'] = True # no install nor uninstall, or failed else: result['changed'] = False - # Fatal error, Administration, Administration Interaction return codes - if rc in (1, 4 , 5): + # Only return failed=False when the returncode is known to be good as there may be more + # undocumented failure return codes + if rc not in (0, 2, 10, 20): result['failed'] = True else: result['failed'] = False diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py index b41a860531f..157fa212c17 100644 --- a/packaging/os/swdepot.py +++ b/packaging/os/swdepot.py @@ -29,7 +29,7 @@ description: - Will install, upgrade and remove packages with swdepot package manager (HP-UX) version_added: "1.4" notes: [] -author: Raul Melo +author: "Raul Melo (@melodous)" options: name: description: diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index 320d17bfc00..7b7aaefbd1d 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -57,7 +57,7 @@ options: required: false default: yes choices: [ "yes", "no" ] -author: Philippe Makowski +author: "Philippe Makowski (@pmakowski)" notes: [] ''' diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 196a7e2782e..f3205051fdf 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -31,7 +31,7 @@ import re DOCUMENTATION = ''' --- module: zypper -author: Patrick Callahan +author: "Patrick Callahan (@dirtyharrycallahan)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: @@ -50,6 +50,13 @@ options: required: false choices: [ present, latest, absent ] default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage ] + default: "package" + version_added: "2.0" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package @@ -95,25 +102,31 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version(m, name): +def get_current_version(m, packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(name) - (rc, stdout, stderr) = m.run_command(cmd) + cmd.extend(packages) + + rc, stdout, stderr = m.run_command(cmd, check_rc=False) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') - for stdoutline, package in zip(stdout.splitlines(), name): - m = rpmoutput_re.match(stdoutline) - if m == None: + + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None - rpmpackage = m.group(1) - rpmversion = m.group(2) - if package != rpmpackage: + package = match.group(1) + version = match.group(2) + current_version[package] = version + + for package in packages: + if package not in current_version: + print package + ' was not returned by rpm \n' return None - current_version[package] = rpmversion return current_version + # Function used to find out if a package is currently installed. def get_package_state(m, packages): cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] @@ -123,24 +136,26 @@ def get_package_state(m, packages): installed_state = {} rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline, name in zip(stdout.splitlines(), packages): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - package = m.group(1) - result = m.group(2) - if not name.startswith(package): - print name + ':' + package + ':' + stdoutline + '\n' + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None + package = match.group(1) + result = match.group(2) if result == 'is installed': - installed_state[name] = True + installed_state[package] = True else: - installed_state[name] = False + installed_state[package] = False + + for package in packages: + if package not in installed_state: + print package + ' was not returned by rpm \n' + return None return installed_state # Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): packages = [] for package in name: if installed_state[package] is False: @@ -150,7 +165,7 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # add global options before zypper command if disable_gpg_check: cmd.append('--no-gpg-checks') - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) # add install parameter if disable_recommends and not old_zypper: cmd.append('--no-recommends') @@ -170,10 +185,10 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme return (rc, stdout, stderr, changed) # Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper) # if we've already made a change, we don't have to check whether a version changed if not changed: @@ -185,9 +200,9 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen cmd.append('--no-gpg-checks') if old_zypper: - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) else: - cmd.extend(['update', '--auto-agree-with-licenses']) + cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type]) cmd.extend(name) rc, stdout, stderr = m.run_command(cmd, check_rc=False) @@ -201,13 +216,13 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen return (rc, stdout, stderr, changed) # Function used to make sure a package is not installed. -def package_absent(m, name, installed_state, old_zypper): +def package_absent(m, name, installed_state, package_type, old_zypper): packages = [] for package in name: if installed_state[package] is True: packages.append(package) if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd) @@ -231,6 +246,7 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), ), @@ -242,6 +258,7 @@ def main(): name = params['name'] state = params['state'] + type_ = params['type'] disable_gpg_check = params['disable_gpg_check'] disable_recommends = params['disable_recommends'] @@ -264,11 +281,11 @@ def main(): # Perform requested action if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, old_zypper) + (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper) elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) if rc != 0: if stderr: diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index f208305fe60..54e20429638 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: zypper_repository -author: Matthias Vogelgesang +author: "Matthias Vogelgesang (@matze)" version_added: "1.4" short_description: Add and remove Zypper repositories description: diff --git a/source_control/bzr.py b/source_control/bzr.py index 0d25a026f7a..0fc6ac28584 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -22,7 +22,7 @@ DOCUMENTATION = u''' --- module: bzr -author: André Paramés +author: "André Paramés (@andreparames)" version_added: "1.1" short_description: Deploy software (or files) from bzr branches description: diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py index 7aaff98f413..d75fcb1573d 100644 --- a/source_control/github_hooks.py +++ b/source_control/github_hooks.py @@ -64,7 +64,7 @@ options: default: 'json' choices: ['json', 'form'] -author: Phillip Gentry, CX Inc +author: "Phillip Gentry, CX Inc (@pcgentry)" ''' EXAMPLES = ''' diff --git a/system/alternatives.py b/system/alternatives.py old mode 100755 new mode 100644 index 871a494e87d..90e2237f86c --- a/system/alternatives.py +++ b/system/alternatives.py @@ -4,6 +4,7 @@ """ Ansible module to manage symbolic link alternatives. (c) 2014, Gabe Mulley +(c) 2015, David Wittman This file is part of Ansible @@ -26,9 +27,12 @@ DOCUMENTATION = ''' module: alternatives short_description: Manages alternative programs for common commands description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" +author: + - "David Wittman (@DavidWittman)" + - "Gabe Mulley (@mulby)" options: name: description: @@ -41,6 +45,7 @@ options: link: description: - The path to the symbolic link that should point to the real executable. + - This option is required on RHEL-based distributions required: false requirements: [ update-alternatives ] ''' @@ -55,12 +60,14 @@ EXAMPLES = ''' DEFAULT_LINK_PRIORITY = 50 +import re + def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - path = dict(required=True), + path = dict(required=True), link = dict(required=False), ), supports_check_mode=True, @@ -71,78 +78,55 @@ def main(): path = params['path'] link = params['link'] - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) current_path = None all_alternatives = [] - os_family = None - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, _) = module.run_command( + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] ) - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this on Debian systems: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - if rc == 0: - os_family = "Debian" - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - elif rc == 2: - os_family = "RedHat" - # This is the version of update-alternatives that is shipped with - # chkconfig on RedHat-based systems. Try again with the right options. - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--list'] - ) - for line in query_output.splitlines(): - line_name, line_mode, line_path = line.strip().split("\t") - if line_name != name: - continue - current_path = line_path - break + # Alternatives already exist for this link group + # Parse the output to determine the current path of the symlink and + # available alternatives + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', + re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) + + current_path = current_path_regex.search(display_output).group(1) + all_alternatives = alternative_regex.findall(display_output) + + if not link: + # Read the current symlink target from `update-alternatives --query` + # in case we need to install the new alternative before setting it. + # + # This is only compatible on Debian-based systems, as the other + # alternatives don't have --query available + rc, query_output, _ = module.run_command( + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] + ) + if rc == 0: + for line in query_output.splitlines(): + if line.startswith('Link:'): + link = line.split()[1] + break if current_path != path: if module.check_mode: module.exit_json(changed=True, current_path=current_path) try: # install the requested path if necessary - # (unsupported on the RedHat version) - if path not in all_alternatives and os_family == "Debian": - if link: - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) - else: - module.fail_json("Needed to install the alternative, but unable to do so, as we are missking the link") + if path not in all_alternatives: + if not link: + module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") + + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) # select the requested path module.run_command( diff --git a/system/at.py b/system/at.py index 770148991f1..0ce9ff2c7d4 100644 --- a/system/at.py +++ b/system/at.py @@ -59,7 +59,7 @@ options: default: false requirements: - at -author: Richard Isaacson +author: "Richard Isaacson (@risaacson)" ''' EXAMPLES = ''' diff --git a/system/capabilities.py b/system/capabilities.py index f4a9f62c0d0..ce8ffcfa632 100644 --- a/system/capabilities.py +++ b/system/capabilities.py @@ -50,7 +50,7 @@ notes: and flags to compare, so you will want to ensure that your capabilities argument matches the final capabilities. requirements: [] -author: Nate Coraor +author: "Nate Coraor (@natefoo)" ''' EXAMPLES = ''' diff --git a/system/cronvar.py b/system/cronvar.py new file mode 100644 index 00000000000..fe337752d59 --- /dev/null +++ b/system/cronvar.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Cronvar Plugin: The goal of this plugin is to provide an indempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable +# +# This module is based on the crontab module. +# + +DOCUMENTATION = """ +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. This module allows + you to create, update, or delete cron variable definitions. +version_added: "2.0" +options: + name: + description: + - Name of the crontab variable. + default: null + required: true + value: + description: + - The value to set this variable to. Required if state=present. + required: false + default: null + insertafter: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + after the variable specified. + insertbefore: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + just before the variable specified. + state: + description: + - Whether to ensure that the variable is present or absent. + required: false + default: present + choices: [ "present", "absent" ] + user: + description: + - The specific user whose crontab should be modified. + required: false + default: root + cron_file: + description: + - If specified, uses this file in cron.d instead of an individual user's crontab. + required: false + default: null + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + required: false + default: false +requirements: + - cron +author: "Doug Luce (@dougluce)" +""" + +EXAMPLES = ''' +# Ensure a variable exists. +# Creates an entry like "EMAIL=doug@ansibmod.con.com" +- cronvar: name="EMAIL" value="doug@ansibmod.con.com" + +# Make sure a variable is gone. This will remove any variable named +# "LEGACY" +- cronvar: name="LEGACY" state=absent + +# Adds a variable to a file under /etc/cron.d +- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log" + user="root" cron_file=ansible_yum-autoupdate +''' + +import os +import re +import tempfile +import platform +import pipes +import shlex + +CRONCMD = "/usr/bin/crontab" + +class CronVarError(Exception): + pass + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + if self.user is None: + self.user = 'root' + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', )) + # select whether we dump additional debug info through syslog + self.syslogging = False + + if cron_file: + self.cron_file = '/etc/cron.d/%s' % cron_file + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError, e: + # cron file does not exist + return + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and + not re.match( r'# \(/tmp/.*installed on.*\)', l) and + not re.match( r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + if self.syslogging: + syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError, e: + # cron file does not exist + return False + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + comment = None + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + (var_name, _) = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) + elif platform.system() == 'AIX': + return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) + +#================================================== + +def main(): + # The following example playbooks: + # + # - cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + value=dict(required=False), + user=dict(required=False), + cron_file=dict(required=False), + insertafter=dict(default=None), + insertbefore=dict(default=None), + state=dict(default='present', choices=['present', 'absent']), + backup=dict(default=False, type='bool'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(022) + cronvar = CronVar(module, user, cron_file) + + if cronvar.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variabale") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + # --- should never get here + module.exit_json(msg="Unable to execute cronvar task.") + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/system/crypttab.py b/system/crypttab.py index 5142a6cf327..44d9f859791 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -69,7 +69,7 @@ options: notes: [] requirements: [] -author: Steve +author: "Steve (@groks)" ''' EXAMPLES = ''' @@ -103,7 +103,7 @@ def main(): state = module.params['state'] path = module.params['path'] - if backing_device is None and password is None and opts is None: + if state != 'absent' and backing_device is None and password is None and opts is None: module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", **module.params) @@ -155,8 +155,11 @@ def main(): if changed and not module.check_mode: - with open(path, 'wb') as f: + try: + f = open(path, 'wb') f.write(str(crypttab)) + finally: + f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -172,9 +175,12 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path,'a').close() - with open(path, 'r') as f: + try: + f = open(path, 'r') for line in f.readlines(): self._lines.append(Line(line)) + finally: + f.close() def add(self, line): self._lines.append(line) @@ -242,10 +248,19 @@ class Line(object): def _split_line(self, line): fields = line.split() + try: + field2 = field[2] + except IndexError: + field2 = None + try: + field3 = field[3] + except IndexError: + field3 = None + return (fields[0], fields[1], - fields[2] if len(fields) >= 3 else None, - fields[3] if len(fields) >= 4 else None) + field2, + fields3) def remove(self): self.line, self.name, self.backing_device = '', None, None @@ -260,7 +275,10 @@ class Line(object): if self.valid(): fields = [self.name, self.backing_device] if self.password is not None or self.opts: - fields.append(self.password if self.password is not None else 'none') + if self.password is not None: + fields.append(self.password) + else: + self.password('none') if self.opts: fields.append(str(self.opts)) return ' '.join(fields) @@ -276,7 +294,10 @@ class Options(dict): if opts_string is not None: for opt in opts_string.split(','): kv = opt.split('=') - k, v = (kv[0], kv[1]) if len(kv) > 1 else (kv[0], None) + if len(kv) > 1: + k, v = (kv[0], kv[1]) + else: + k, v = (kv[0], None) self[k] = v def add(self, opts_string): @@ -324,8 +345,13 @@ class Options(dict): and sorted(self.items()) == sorted(obj.items())) def __str__(self): - return ','.join([k if v is None else '%s=%s' % (k, v) - for k, v in self.items()]) + ret = [] + for k, v in self.items(): + if v is None: + ret.append(k) + else: + ret.append('%s=%s' % (k, v)) + return ','.join(ret) # import module snippets from ansible.module_utils.basic import * diff --git a/system/debconf.py b/system/debconf.py index 0deaff25eb1..b249986a947 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -68,7 +68,7 @@ options: required: false default: False aliases: [] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' diff --git a/system/facter.py b/system/facter.py index a4912835447..6c09877fcbe 100644 --- a/system/facter.py +++ b/system/facter.py @@ -32,7 +32,9 @@ version_added: "0.2" options: {} notes: [] requirements: [ "facter", "ruby-json" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/system/filesystem.py b/system/filesystem.py index 0de5b75e38b..1e867f30270 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" module: filesystem short_description: Makes file system on block device description: diff --git a/system/firewalld.py b/system/firewalld.py index dedc9260740..04dd4981584 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -23,24 +23,30 @@ DOCUMENTATION = ''' module: firewalld short_description: Manage arbitrary ports/services with firewalld description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules + - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules. version_added: "1.4" options: service: description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" + - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services." required: false default: null port: description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" + - "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges." required: false default: null rich_rule: description: - - "Rich rule to add/remove to/from firewalld" + - "Rich rule to add/remove to/from firewalld." required: false default: null + source: + description: + - 'The source/network you would like to add/remove to/from firewalld' + required: false + default: null + version_added: "2.0" zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -49,7 +55,7 @@ options: choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] permanent: description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" + - "Should this configuration be in the running firewalld configuration or persist across reboots." required: true immediate: description: @@ -59,29 +65,30 @@ options: version_added: "1.9" state: description: - - "Should this port accept(enabled) or reject(disabled) connections" + - "Should this port accept(enabled) or reject(disabled) connections." required: true timeout: description: - - "The amount of time the rule should be in effect for when non-permanent" + - "The amount of time the rule should be in effect for when non-permanent." required: false default: 0 notes: - - Not tested on any debian based system -requirements: [ firewalld >= 0.2.11 ] -author: Adam Miller + - Not tested on any Debian based system. +requirements: [ 'firewalld >= 0.2.11' ] +author: "Adam Miller (@maxamillion)" ''' EXAMPLES = ''' - firewalld: service=https permanent=true state=enabled - firewalld: port=8081/tcp permanent=true state=disabled +- firewalld: port=161-162/udp permanent=true state=enabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +- firewalld: source='192.168.1.0/24' zone=internal state=enabled ''' import os import re -import sys try: import firewall.config @@ -89,14 +96,9 @@ try: from firewall.client import FirewallClient fw = FirewallClient() - if not fw.connected: - raise Exception('failed to connect to the firewalld daemon') + HAS_FIREWALLD = True except ImportError: - print "failed=True msg='firewalld required for this module'" - sys.exit(1) -except Exception, e: - print "failed=True msg='%s'" % str(e) - sys.exit(1) + HAS_FIREWALLD = False ################ # port handling @@ -132,7 +134,27 @@ def set_port_disabled_permanent(zone, port, protocol): fw_settings = fw_zone.getSettings() fw_settings.removePort(port, protocol) fw_zone.update(fw_settings) - + +#################### +# source handling +# +def get_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if source in fw_settings.getSources(): + return True + else: + return False + +def add_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addSource(source) + +def remove_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeSource(source) #################### # service handling @@ -214,13 +236,19 @@ def main(): port=dict(required=False,default=None), rich_rule=dict(required=False,default=None), zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), immediate=dict(type='bool',default=False), + source=dict(required=False,default=None), + permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), ), supports_check_mode=True ) + if module.params['source'] == None and module.params['permanent'] == None: + module.fail(msg='permanent is a required parameter') + + if not HAS_FIREWALLD: + module.fail_json(msg='firewalld required for this module') ## Pre-run version checking if FW_VERSION < "0.2.11": @@ -231,6 +259,7 @@ def main(): msgs = [] service = module.params['service'] rich_rule = module.params['rich_rule'] + source = module.params['source'] if module.params['port'] != None: port, protocol = module.params['port'].split('/') @@ -310,6 +339,24 @@ def main(): if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + if source != None: + is_enabled = get_source(zone, source) + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + add_source(zone, source) + changed=True + msgs.append("Added %s to zone %s" % (source, zone)) + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + remove_source(zone, source) + changed=True + msgs.append("Removed %s from zone %s" % (source, zone)) if port != None: if permanent: is_enabled = get_port_enabled_permanent(zone, [port, protocol]) @@ -399,6 +446,4 @@ def main(): ################################################# # import module snippets from ansible.module_utils.basic import * - main() - diff --git a/system/getent.py b/system/getent.py index bb6d162398c..7df9e1d795f 100644 --- a/system/getent.py +++ b/system/getent.py @@ -54,7 +54,7 @@ options: notes: - "Not all databases support enumeration, check system documentation for details" requirements: [ ] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' diff --git a/system/gluster_volume.py b/system/gluster_volume.py index d51512a1436..ff1ce9831db 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -38,29 +38,36 @@ options: use started/stopped to control it's availability. cluster: required: false + default: null description: - List of hosts to use for probing and brick setup host: required: false + default: null description: - Override local hostname (for peer probing purposes) replicas: required: false + default: null description: - Replica count for volume stripes: required: false + default: null description: - Stripe count for volume transport: required: false choices: [ 'tcp', 'rdma', 'tcp,rdma' ] + default: 'tcp' description: - Transport type for volume - brick: + bricks: required: false + default: null description: - - Brick path on servers + - Brick paths on servers. Multiple brick paths can be separated by commas + aliases: ['brick'] start_on_create: choices: [ 'yes', 'no'] required: false @@ -69,41 +76,46 @@ options: rebalance: choices: [ 'yes', 'no'] required: false + default: 'no' description: - Controls whether the cluster is rebalanced after changes directory: required: false + default: null description: - Directory for limit-usage options: required: false + default: null description: - A dictionary/hash with options/settings for the volume quota: required: false + default: null description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) force: required: false + default: null description: - If brick is being created in the root partition, module will fail. Set force to true to override this behaviour notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" -author: Taneli Leppä +author: "Taneli Leppä (@rosmo)" """ EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11" run_once: true - name: tune gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}' - name: start gluster volume - gluster_volume: status=started name=test1 + gluster_volume: state=started name=test1 - name: limit usage gluster_volume: state=present name=test1 directory=/foo quota=20.0MB @@ -113,173 +125,190 @@ EXAMPLES = """ - name: remove gluster volume gluster_volume: state=absent name=test1 + +- name: create gluster volume with multiple bricks + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11" + run_once: true """ import shutil import time import socket -def main(): - +glusterbin = '' - def run_gluster(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) +def run_gluster(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def run_gluster_nofail(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - return None - return out - - def run_gluster_yes(gargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, data='y\n') - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def get_peers(): - out = run_gluster([ 'peer', 'status']) - i = 0 - peers = {} - hostname = None - uuid = None - state = None - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'hostname': - hostname = value - if key.lower() == 'uuid': - uuid = value - if key.lower() == 'state': - state = value - peers[hostname] = [ uuid, state ] - return peers - - def get_volumes(): - out = run_gluster([ 'volume', 'info' ]) - - volumes = {} - volume = {} - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'volume name': - volume['name'] = value + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) + except Exception, e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) + return out + +def run_gluster_nofail(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + return None + return out + +def run_gluster_yes(gargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, data='y\n') + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) + return out + +def get_peers(): + out = run_gluster([ 'peer', 'status']) + i = 0 + peers = {} + hostname = None + uuid = None + state = None + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'hostname': + hostname = value + if key.lower() == 'uuid': + uuid = value + if key.lower() == 'state': + state = value + peers[hostname] = [ uuid, state ] + return peers + +def get_volumes(): + out = run_gluster([ 'volume', 'info' ]) + + volumes = {} + volume = {} + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'volume name': + volume['name'] = value + volume['options'] = {} + volume['quota'] = False + if key.lower() == 'volume id': + volume['id'] = value + if key.lower() == 'status': + volume['status'] = value + if key.lower() == 'transport-type': + volume['transport'] = value + if key.lower() != 'bricks' and key.lower()[:5] == 'brick': + if not 'bricks' in volume: + volume['bricks'] = [] + volume['bricks'].append(value) + # Volume options + if '.' in key: + if not 'options' in volume: volume['options'] = {} - volume['quota'] = False - if key.lower() == 'volume id': - volume['id'] = value - if key.lower() == 'status': - volume['status'] = value - if key.lower() == 'transport-type': - volume['transport'] = value - if key.lower() != 'bricks' and key.lower()[:5] == 'brick': - if not 'bricks' in volume: - volume['bricks'] = [] - volume['bricks'].append(value) - # Volume options - if '.' in key: - if not 'options' in volume: - volume['options'] = {} - volume['options'][key] = value - if key == 'features.quota' and value == 'on': - volume['quota'] = True - else: - if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': - if len(volume) > 0: - volumes[volume['name']] = volume - volume = {} - return volumes - - def get_quotas(name, nofail): - quotas = {} - if nofail: - out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) - if not out: - return quotas + volume['options'][key] = value + if key == 'features.quota' and value == 'on': + volume['quota'] = True else: - out = run_gluster([ 'volume', 'quota', name, 'list' ]) - for row in out.split('\n'): - if row[:1] == '/': - q = re.split('\s+', row) - quotas[q[0]] = q[1] - return quotas - - def wait_for_peer(host): - for x in range(0, 4): - peers = get_peers() - if host in peers and peers[host][1].lower().find('peer in cluster') != -1: - return True - time.sleep(1) - return False - - def probe(host): - run_gluster([ 'peer', 'probe', host ]) - if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) - changed = True - - def probe_all_peers(hosts, peers, myhostname): - for host in hosts: - if host not in peers: - # dont probe ourselves - if myhostname != host: - probe(host) - - def create_volume(name, stripe, replica, transport, hosts, brick, force): - args = [ 'volume', 'create' ] - args.append(name) - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - args.append('transport') - args.append(transport) + if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': + if len(volume) > 0: + volumes[volume['name']] = volume + volume = {} + return volumes + +def get_quotas(name, nofail): + quotas = {} + if nofail: + out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) + if not out: + return quotas + else: + out = run_gluster([ 'volume', 'quota', name, 'list' ]) + for row in out.split('\n'): + if row[:1] == '/': + q = re.split('\s+', row) + quotas[q[0]] = q[1] + return quotas + +def wait_for_peer(host): + for x in range(0, 4): + peers = get_peers() + if host in peers and peers[host][1].lower().find('peer in cluster') != -1: + return True + time.sleep(1) + return False + +def probe(host, myhostname): + global module + run_gluster([ 'peer', 'probe', host ]) + if not wait_for_peer(host): + module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) + changed = True + +def probe_all_peers(hosts, peers, myhostname): + for host in hosts: + host = host.strip() # Clean up any extra space for exact comparison + if host not in peers: + # dont probe ourselves + if myhostname != host: + probe(host, myhostname) + +def create_volume(name, stripe, replica, transport, hosts, bricks, force): + args = [ 'volume', 'create' ] + args.append(name) + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.append('transport') + args.append(transport) + for brick in bricks: for host in hosts: args.append(('%s:%s' % (host, brick))) - if force: - args.append('force') - run_gluster(args) + if force: + args.append('force') + run_gluster(args) - def start_volume(name): - run_gluster([ 'volume', 'start', name ]) +def start_volume(name): + run_gluster([ 'volume', 'start', name ]) - def stop_volume(name): - run_gluster_yes([ 'volume', 'stop', name ]) +def stop_volume(name): + run_gluster_yes([ 'volume', 'stop', name ]) - def set_volume_option(name, option, parameter): - run_gluster([ 'volume', 'set', name, option, parameter ]) +def set_volume_option(name, option, parameter): + run_gluster([ 'volume', 'set', name, option, parameter ]) - def add_brick(name, brick, force): - args = [ 'volume', 'add-brick', name, brick ] - if force: - args.append('force') - run_gluster(args) +def add_brick(name, brick, force): + args = [ 'volume', 'add-brick', name, brick ] + if force: + args.append('force') + run_gluster(args) - def rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) +def do_rebalance(name): + run_gluster([ 'volume', 'rebalance', name, 'start' ]) - def enable_quota(name): - run_gluster([ 'volume', 'quota', name, 'enable' ]) +def enable_quota(name): + run_gluster([ 'volume', 'quota', name, 'enable' ]) - def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) +def set_quota(name, directory, value): + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) +def main(): ### MAIN ### + global module module = AnsibleModule( argument_spec=dict( name=dict(required=True, default=None, aliases=['volume']), @@ -289,7 +318,7 @@ def main(): stripes=dict(required=False, default=None, type='int'), replicas=dict(required=False, default=None, type='int'), transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), - brick=dict(required=False, default=None), + bricks=dict(required=False, default=None, aliases=['brick']), start_on_create=dict(required=False, default=True, type='bool'), rebalance=dict(required=False, default=False, type='bool'), options=dict(required=False, default={}, type='dict'), @@ -299,6 +328,7 @@ def main(): ) ) + global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False @@ -306,7 +336,7 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_path = module.params['brick'] + brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] @@ -318,6 +348,16 @@ def main(): if not myhostname: myhostname = socket.gethostname() + # Clean up if last element is empty. Consider that yml can look like this: + # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" + if cluster != None and cluster[-1] == '': + cluster = cluster[0:-1] + + if brick_paths != None and "," in brick_paths: + brick_paths = brick_paths.split(",") + else: + brick_paths = [brick_paths] + options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] @@ -333,7 +373,9 @@ def main(): # do the work! if action == 'absent': if volume_name in volumes: - run_gluster([ 'volume', 'delete', name ]) + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + run_gluster_yes([ 'volume', 'delete', volume_name ]) changed = True if action == 'present': @@ -341,7 +383,7 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_path, force) + create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True @@ -355,10 +397,11 @@ def main(): removed_bricks = [] all_bricks = [] for node in cluster: - brick = '%s:%s' % (node, brick_path) - all_bricks.append(brick) - if brick not in volumes[volume_name]['bricks']: - new_bricks.append(brick) + for brick_path in brick_paths: + brick = '%s:%s' % (node, brick_path) + all_bricks.append(brick) + if brick not in volumes[volume_name]['bricks']: + new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: @@ -403,7 +446,7 @@ def main(): if changed: volumes = get_volumes() if rebalance: - rebalance(volume_name) + do_rebalance(volume_name) facts = {} facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas } diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py index 6af08c0788c..296a082a2ea 100644 --- a/system/kernel_blacklist.py +++ b/system/kernel_blacklist.py @@ -25,7 +25,7 @@ import re DOCUMENTATION = ''' --- module: kernel_blacklist -author: Matthias Vogelgesang +author: "Matthias Vogelgesang (@matze)" version_added: 1.4 short_description: Blacklist kernel modules description: diff --git a/system/known_hosts.py b/system/known_hosts.py index d4a6e9c35e0..303d9410d1e 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -26,7 +26,7 @@ description: - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. This is useful if you're going to want to use the M(git) module over ssh, for example. If you have a very large number of host keys to manage, you will find the M(template) module more useful. -version_added: "1.6" +version_added: "1.9" options: name: aliases: [ 'host' ] @@ -51,7 +51,7 @@ options: required: no default: present requirements: [ ] -author: Matthew Vernon +author: "Matthew Vernon (@mcv21)" ''' EXAMPLES = ''' @@ -82,7 +82,7 @@ def enforce_state(module, params): Add or remove key. """ - host = params["host"] + host = params["name"] key = params.get("key",None) port = params.get("port",None) #expand the path parameter; otherwise module.add_path_info @@ -128,20 +128,25 @@ def enforce_state(module, params): module.fail_json(msg="Failed to read %s: %s" % \ (path,str(e))) try: - outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path), - delete=False) + outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path)) if inf is not None: for line in inf: outf.write(line) inf.close() outf.write(key) - outf.close() + outf.flush() module.atomic_move(outf.name,path) - except IOError,e: + except (IOError,OSError),e: module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) + + try: + outf.close() + except: + pass + params['changed'] = True - + return params def sanity_check(module,host,key,sshkeygen): @@ -162,16 +167,20 @@ def sanity_check(module,host,key,sshkeygen): #The approach is to write the key to a temporary file, #and then attempt to look up the specified host in that file. try: - outf=tempfile.NamedTemporaryFile(delete=False) + outf=tempfile.NamedTemporaryFile() outf.write(key) - outf.close() + outf.flush() except IOError,e: module.fail_json(msg="Failed to write to temporary file %s: %s" % \ (outf.name,str(e))) rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, '-f',outf.name], check_rc=True) - os.remove(outf.name) + try: + outf.close() + except: + pass + if stdout=='': #host not found module.fail_json(msg="Host parameter does not match hashed host field in supplied key") @@ -188,10 +197,14 @@ def search_for_host_key(module,host,key,path,sshkeygen): replace=False if os.path.exists(path)==False: return False, False + #openssh >=6.4 has changed ssh-keygen behaviour such that it returns + #1 if no host is found, whereas previously it returned 0 rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], - check_rc=True) - if stdout=='': #host not found - return False, False + check_rc=False) + if stdout=='' and stderr=='' and (rc==0 or rc==1): + return False, False #host not found, no other errors + if rc!=0: #something went wrong + module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) #If user supplied no key, we don't want to try and replace anything with it if key is None: diff --git a/system/locale_gen.py b/system/locale_gen.py index 5d53951cf18..9108cfb53cd 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -13,6 +13,7 @@ short_description: Creates or removes locales. description: - Manages locales by editing /etc/locale.gen and invoking locale-gen. version_added: "1.6" +author: "Augustus Kling (@AugustusKling)" options: name: description: @@ -55,11 +56,12 @@ def is_available(name, ubuntuMode): __locales_available = '/etc/locale.gen' re_compiled = re.compile(__regexp) - with open(__locales_available, 'r') as fd: - for line in fd: - result = re_compiled.match(line) - if result and result.group('locale') == name: - return True + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() return False def is_present(name): @@ -76,10 +78,16 @@ def fix_case(name): def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" - with open("/etc/locale.gen", "r") as f: + try: + f = open("/etc/locale.gen", "r") lines = [line.replace(existing_line, new_line) for line in f] - with open("/etc/locale.gen", "w") as f: + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") f.write("".join(lines)) + finally: + f.close() def set_locale(name, enabled=True): """ Sets the state of the locale. Defaults to enabled. """ @@ -88,10 +96,16 @@ def set_locale(name, enabled=True): new_string = '%s \g' % (name) else: new_string = '# %s \g' % (name) - with open("/etc/locale.gen", "r") as f: + try: + f = open("/etc/locale.gen", "r") lines = [re.sub(search_string, new_string, line) for line in f] - with open("/etc/locale.gen", "w") as f: + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") f.write("".join(lines)) + finally: + f.close() def apply_change(targetState, name): """Create or remove locale. @@ -124,13 +138,19 @@ def apply_change_ubuntu(targetState, name): localeGenExitValue = call(["locale-gen", name]) else: # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - with open("/var/lib/locales/supported.d/local", "r") as f: + try: + f = open("/var/lib/locales/supported.d/local", "r") content = f.readlines() - with open("/var/lib/locales/supported.d/local", "w") as f: + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") for line in content: locale, charset = line.split(' ') if locale != name: f.write(line) + finally: + f.close() # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! localeGenExitValue = call(["locale-gen", "--purge"]) @@ -159,7 +179,7 @@ def main(): # Ubuntu created its own system to manage locales. ubuntuMode = True else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") else: # We found the common way to manage locales. ubuntuMode = False @@ -168,7 +188,10 @@ def main(): module.fail_json(msg="The locales you've entered is not available " "on your system.") - prev_state = "present" if is_present(name) else "absent" + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" changed = (prev_state!=state) if module.check_mode: @@ -180,7 +203,7 @@ def main(): apply_change(state, name) else: apply_change_ubuntu(state, name) - except EnvironmentError as e: + except EnvironmentError, e: module.fail_json(msg=e.strerror, exitValue=e.errno) module.exit_json(name=name, changed=changed, msg="OK") diff --git a/system/lvg.py b/system/lvg.py index e568e9df677..9e3ba2d2931 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" module: lvg short_description: Configure LVM volume groups description: @@ -135,7 +135,9 @@ def main(): elif state == 'present': module.fail_json(msg="No physical volumes given.") - + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) if state=='present': ### check given devices @@ -209,7 +211,7 @@ def main(): module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) ### resize VG - current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] + current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) diff --git a/system/lvol.py b/system/lvol.py index d9be9e7dc70..7a01d83829c 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -20,7 +20,9 @@ DOCUMENTATION = ''' --- -author: Jeroen Hoekx +author: + - "Jeroen Hoekx (@jhoekx)" + - "Alexander Bulimov (@abulimov)" module: lvol short_description: Configure LVM logical volumes description: @@ -55,6 +57,10 @@ options: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. required: false + opts: + version_added: "2.0" + description: + - Free-form options to be passed to the lvcreate command notes: - Filesystems on top of the volume are not resized. ''' @@ -69,6 +75,9 @@ EXAMPLES = ''' # Create a logical volume the size of all remaining space in the volume group - lvol: vg=firefly lv=test size=100%FREE +# Create a logical volume with special options +- lvol: vg=firefly lv=test size=512g opts="-r 16" + # Extend the logical volume to 1024m. - lvol: vg=firefly lv=test size=1024 @@ -83,6 +92,8 @@ import re decimal_point = re.compile(r"(\.|,)") +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) def parse_lvs(data): lvs = [] @@ -95,26 +106,52 @@ def parse_lvs(data): return lvs +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command("%s version" % (ver_cmd)) + if rc != 0: + return None + m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + def main(): module = AnsibleModule( argument_spec=dict( vg=dict(required=True), lv=dict(required=True), size=dict(), + opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), supports_check_mode=True, ) + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found == None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + if version_found >= version_yesopt: + yesopt = "--yes" + else: + yesopt = "" + vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] + opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) size_opt = 'L' size_unit = 'm' + if opts is None: + opts = "" + if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: @@ -187,7 +224,8 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg) + rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: @@ -216,8 +254,8 @@ def main(): elif size < this_lv['size']: if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = module.get_bin_path("lvextend", required=True) - tool.append("--force") + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') if tool: if module.check_mode: @@ -236,4 +274,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/modprobe.py b/system/modprobe.py index 50c8f72fb2a..64e36c784a7 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -25,7 +25,10 @@ module: modprobe short_description: Add or remove kernel modules requirements: [] version_added: 1.4 -author: David Stygstra, Julien Dauphant, Matt Jeffery +author: + - "David Stygstra (@stygstra)" + - "Julien Dauphant" + - "Matt Jeffery" description: - Add or remove kernel modules. options: @@ -97,13 +100,13 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) + rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: - rc, _, err = module.run_command(['rmmod', args['name']]) + rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True diff --git a/system/ohai.py b/system/ohai.py index b50abc9db03..6f066ec5ad8 100644 --- a/system/ohai.py +++ b/system/ohai.py @@ -32,7 +32,9 @@ version_added: "0.6" options: {} notes: [] requirements: [ "ohai" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' diff --git a/system/open_iscsi.py b/system/open_iscsi.py index c661a723d77..e2477538888 100644 --- a/system/open_iscsi.py +++ b/system/open_iscsi.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: open_iscsi -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" version_added: "1.4" short_description: Manage iscsi targets with open-iscsi description: diff --git a/system/osx_defaults.py b/system/osx_defaults.py new file mode 100644 index 00000000000..e4dc5f8c750 --- /dev/null +++ b/system/osx_defaults.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, GeekChimp - Franck Nijhof +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: osx_defaults +author: Franck Nijhof (@frenck) +short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible +description: + - osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts. + Mac OS X applications and other programs use the defaults system to record user preferences and other + information that must be maintained when the applications aren't running (such as default font for new + documents, or the position of an Info panel). +version_added: "2.0" +options: + domain: + description: + - The domain is a domain name of the form com.companyname.appname. + required: false + default: NSGlobalDomain + key: + description: + - The key of the user preference + required: true + type: + description: + - The type of value to write. + required: false + default: string + choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ] + array_add: + description: + - Add new elements to the array for a key which has an array as its value. + required: false + default: false + choices: [ "true", "false" ] + value: + description: + - The value to write. Only required when state = present. + required: false + default: null + state: + description: + - The state of the user defaults + required: false + default: present + choices: [ "present", "absent" ] +notes: + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +''' + +EXAMPLES = ''' +- osx_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present +- osx_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present +- osx_defaults: key=AppleMeasurementUnits type=string value=Centimeters +- osx_defaults: + key: AppleLanguages + type: array + value: ["en", "nl"] +- osx_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent +''' + +from datetime import datetime + +# exceptions --------------------------------------------------------------- {{{ +class OSXDefaultsException(Exception): + pass +# /exceptions -------------------------------------------------------------- }}} + +# class MacDefaults -------------------------------------------------------- {{{ +class OSXDefaults(object): + + """ Class to manage Mac OS user defaults """ + + # init ---------------------------------------------------------------- {{{ + """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + def __init__(self, **kwargs): + + # Initial var for storing current defaults value + self.current_value = None + + # Just set all given parameters + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + # Try to find the defaults executable + self.executable = self.module.get_bin_path( + 'defaults', + required=False, + opt_dirs=self.path.split(':'), + ) + + if not self.executable: + raise OSXDefaultsException("Unable to locate defaults executable.") + + # When state is present, we require a parameter + if self.state == "present" and self.value is None: + raise OSXDefaultsException("Missing value parameter") + + # Ensure the value is the correct type + self.value = self._convert_type(self.type, self.value) + + # /init --------------------------------------------------------------- }}} + + # tools --------------------------------------------------------------- {{{ + """ Converts value to given type """ + def _convert_type(self, type, value): + + if type == "string": + return str(value) + elif type in ["bool", "boolean"]: + if value.lower() in [True, 1, "true", "1", "yes"]: + return True + elif value.lower() in [False, 0, "false", "0", "no"]: + return False + raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) + elif type == "date": + try: + return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + except ValueError: + raise OSXDefaultsException( + "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) + ) + elif type in ["int", "integer"]: + if not str(value).isdigit(): + raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value))) + return int(value) + elif type == "float": + try: + value = float(value) + except ValueError: + raise OSXDefaultsException("Invalid float value: {0}".format(repr(value))) + return value + elif type == "array": + if not isinstance(value, list): + raise OSXDefaultsException("Invalid value. Expected value to be an array") + return value + + raise OSXDefaultsException('Type is not supported: {0}'.format(type)) + + """ Converts array output from defaults to an list """ + @staticmethod + def _convert_defaults_str_to_list(value): + + # Split output of defaults. Every line contains a value + value = value.splitlines() + + # Remove first and last item, those are not actual values + value.pop(0) + value.pop(-1) + + # Remove extra spaces and comma (,) at the end of values + value = [re.sub(',$', '', x.strip(' ')) for x in value] + + return value + # /tools -------------------------------------------------------------- }}} + + # commands ------------------------------------------------------------ {{{ + """ Reads value of this domain & key from defaults """ + def read(self): + # First try to find out the type + rc, out, err = self.module.run_command([self.executable, "read-type", self.domain, self.key]) + + # If RC is 1, the key does not exists + if rc == 1: + return None + + # If the RC is not 0, then terrible happened! Ooooh nooo! + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out) + + # Ok, lets parse the type from output + type = out.strip().replace('Type is ', '') + + # Now get the current value + rc, out, err = self.module.run_command([self.executable, "read", self.domain, self.key]) + + # Strip output + out = out.strip() + + # An non zero RC at this point is kinda strange... + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out) + + # Convert string to list when type is array + if type == "array": + out = self._convert_defaults_str_to_list(out) + + # Store the current_value + self.current_value = self._convert_type(type, out) + + """ Writes value to this domain & key to defaults """ + def write(self): + + # We need to convert some values so the defaults commandline understands it + if type(self.value) is bool: + if self.value: + value = "TRUE" + else: + value = "FALSE" + elif type(self.value) is int or type(self.value) is float: + value = str(self.value) + elif self.array_add and self.current_value is not None: + value = list(set(self.value) - set(self.current_value)) + elif isinstance(self.value, datetime): + value = self.value.strftime('%Y-%m-%d %H:%M:%S') + else: + value = self.value + + # When the type is array and array_add is enabled, morph the type :) + if self.type == "array" and self.array_add: + self.type = "array-add" + + # All values should be a list, for easy passing it to the command + if not isinstance(value, list): + value = [value] + + rc, out, err = self.module.run_command([self.executable, 'write', self.domain, self.key, '-' + self.type] + value) + + if rc != 0: + raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out) + + """ Deletes defaults key from domain """ + def delete(self): + rc, out, err = self.module.run_command([self.executable, 'delete', self.domain, self.key]) + if rc != 0: + raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out) + + # /commands ----------------------------------------------------------- }}} + + # run ----------------------------------------------------------------- {{{ + """ Does the magic! :) """ + def run(self): + + # Get the current value from defaults + self.read() + + # Handle absent state + if self.state == "absent": + print "Absent state detected!" + if self.current_value is None: + return False + self.delete() + return True + + # There is a type mismatch! Given type does not match the type in defaults + if self.current_value is not None and type(self.current_value) is not type(self.value): + raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__) + + # Current value matches the given value. Nothing need to be done. Arrays need extra care + if self.type == "array" and self.current_value is not None and not self.array_add and \ + set(self.current_value) == set(self.value): + return False + elif self.type == "array" and self.current_value is not None and self.array_add and \ + len(list(set(self.value) - set(self.current_value))) == 0: + return False + elif self.current_value == self.value: + return False + + # Change/Create/Set given key/value for domain in defaults + self.write() + return True + + # /run ---------------------------------------------------------------- }}} + +# /class MacDefaults ------------------------------------------------------ }}} + + +# main -------------------------------------------------------------------- {{{ +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict( + default="NSGlobalDomain", + required=False, + ), + key=dict( + default=None, + ), + type=dict( + default="string", + required=False, + choices=[ + "array", + "bool", + "boolean", + "date", + "float", + "int", + "integer", + "string", + ], + ), + array_add=dict( + default=False, + required=False, + choices=BOOLEANS, + ), + value=dict( + default=None, + required=False, + ), + state=dict( + default="present", + required=False, + choices=[ + "absent", "present" + ], + ), + path=dict( + default="/usr/bin:/usr/local/bin", + required=False, + ) + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + key = module.params['key'] + type = module.params['type'] + array_add = module.params['array_add'] + value = module.params['value'] + state = module.params['state'] + path = module.params['path'] + + try: + defaults = OSXDefaults(module=module, domain=domain, key=key, type=type, + array_add=array_add, value=value, state=state, path=path) + changed = defaults.run() + module.exit_json(changed=changed) + except OSXDefaultsException, e: + module.fail_json(msg=e.message) + +# /main ------------------------------------------------------------------- }}} + +from ansible.module_utils.basic import * +main() diff --git a/system/puppet.py b/system/puppet.py new file mode 100644 index 00000000000..336b2c81108 --- /dev/null +++ b/system/puppet.py @@ -0,0 +1,214 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import json +import os +import pipes +import stat + +DOCUMENTATION = ''' +--- +module: puppet +short_description: Runs puppet +description: + - Runs I(puppet) agent or apply in a reliable manner +version_added: "2.0" +options: + timeout: + description: + - How long to wait for I(puppet) to finish. + required: false + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. + required: false + default: None + manifest: + desciption: + - Path to the manifest file to run puppet apply on. + required: false + default: None + show_diff: + description: + - Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default. + required: false + default: no + choices: [ "yes", "no" ] + facts: + description: + - A dict of values to pass in as persistent external facter facts + required: false + default: None + facter_basename: + desciption: + - Basename of the facter output file + required: false + default: ansible + environment: + desciption: + - Puppet environment to be used. + required: false + default: None +requirements: [ puppet ] +author: "Monty Taylor (@emonty)" +''' + +EXAMPLES = ''' +# Run puppet agent and fail if anything goes wrong +- puppet + +# Run puppet and timeout in 5 minutes +- puppet: timeout=5m + +# Run puppet using a different environment +- puppet: environment=testing +''' + + +def _get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed + out_file = os.fdopen( + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(default="30m"), + puppetmaster=dict(required=False, default=None), + manifest=dict(required=False, default=None), + show_diff=dict( + default=False, aliases=['show-diff'], type='bool'), + facts=dict(default=None), + facter_basename=dict(default='ansible'), + environment=dict(required=False, default=None), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('puppetmaster', 'manifest'), + ], + ) + p = module.params + + global PUPPET_CMD + PUPPET_CMD = module.get_bin_path("puppet", False) + + if not PUPPET_CMD: + module.fail_json( + msg="Could not find puppet. Please ensure it is installed.") + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest'])) + + # Check if puppet is disabled here + if not p['manifest']: + rc, stdout, stderr = module.run_command( + PUPPET_CMD + " config print agent_disabled_lockfile") + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + if module.params['facts'] and not module.check_mode: + _write_structured_data( + _get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( + timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) + + if not p['manifest']: + cmd = ("%(base_cmd)s agent --onetime" + " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" + " --detailed-exitcodes --verbose") % dict( + base_cmd=base_cmd, + ) + if p['puppetmaster']: + cmd += " -- server %s" % pipes.quote(p['puppetmaster']) + if p['show_diff']: + cmd += " --show-diff" + if p['environment']: + cmd += " --environment '%s'" % p['environment'] + if module.check_mode: + cmd += " --noop" + else: + cmd += " --no-noop" + else: + cmd = "%s apply --detailed-exitcodes " % base_cmd + if p['environment']: + cmd += "--environment '%s' " % p['environment'] + if module.check_mode: + cmd += "--noop " + else: + cmd += "--no-noop " + cmd += pipes.quote(p['manifest']) + rc, stdout, stderr = module.run_command(cmd) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout) + elif rc == 1: + # rc==1 could be because it's disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (cmd, rc), + stdout=stdout, stderr=stderr) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/system/svc.py b/system/svc.py old mode 100755 new mode 100644 index 04749cfc134..9831ce42ea7 --- a/system/svc.py +++ b/system/svc.py @@ -1,10 +1,27 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see DOCUMENTATION = ''' --- module: svc -author: Brian Coca +author: "Brian Coca (@bcoca)" version_added: short_description: Manage daemontools services. description: diff --git a/system/ufw.py b/system/ufw.py index a49aa8c3a49..cd148edf2ef 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -28,7 +28,10 @@ short_description: Manage firewall with UFW description: - Manage firewall with UFW. version_added: 1.6 -author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik +author: + - "Aleksey Ovcharenko (@ovcharenko)" + - "Jarno Keskikangas (@pyykkis)" + - "Ahti Kitsik (@ahtik)" notes: - See C(man ufw) for more examples. requirements: @@ -113,6 +116,11 @@ options: - Specify interface for rule. required: false aliases: ['if'] + route: + description: + - Apply the rule to routed/forwarded packets. + required: false + choices: ['yes', 'no'] ''' EXAMPLES = ''' @@ -162,6 +170,10 @@ ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 # Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. # Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 + +# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24. +# Can be used to further restrict a global FORWARD policy set to allow +ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24 ''' from operator import itemgetter @@ -175,6 +187,7 @@ def main(): logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete = dict(default=False, type='bool'), + route = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), @@ -238,10 +251,11 @@ def main(): elif command == 'rule': # Rules are constructed according to the long format # - # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([module.boolean(params['route']), 'route']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([module.boolean(params['log']), 'log']) diff --git a/system/zfs.py b/system/zfs.py index 93248897051..c3c87634377 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -177,7 +177,7 @@ options: description: - The sync property. required: False - choices: ['on','off'] + choices: ['standard','always','disabled'] utf8only: description: - The utf8only property. @@ -206,7 +206,7 @@ options: - The zoned property. required: False choices: ['on','off'] -author: Johan Wiren +author: "Johan Wiren (@johanwiren)" ''' EXAMPLES = ''' @@ -250,7 +250,7 @@ class Zfs(object): if self.module.check_mode: self.changed = True return - properties=self.properties + properties = self.properties volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) if "@" in self.name: @@ -260,6 +260,10 @@ class Zfs(object): cmd = [self.module.get_bin_path('zfs', True)] cmd.append(action) + + if createparent: + cmd.append('-p') + if volblocksize: cmd.append('-b %s' % volblocksize) if properties: @@ -271,7 +275,7 @@ class Zfs(object): cmd.append(self.name) (rc, err, out) = self.module.run_command(' '.join(cmd)) if rc == 0: - self.changed=True + self.changed = True else: self.module.fail_json(msg=out) @@ -345,6 +349,7 @@ def main(): 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, 'copies': {'required': False, 'choices':['1', '2', '3']}, + 'createparent': {'required': False, 'choices':['on', 'off']}, 'dedup': {'required': False, 'choices':['on', 'off']}, 'devices': {'required': False, 'choices':['on', 'off']}, 'exec': {'required': False, 'choices':['on', 'off']}, @@ -368,7 +373,7 @@ def main(): 'sharenfs': {'required': False}, 'sharesmb': {'required': False}, 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['on', 'off']}, + 'sync': {'required': False, 'choices':['standard', 'always', 'disabled']}, # Not supported #'userquota': {'required': False}, 'utf8only': {'required': False, 'choices':['on', 'off']}, @@ -396,7 +401,7 @@ def main(): result['name'] = name result['state'] = state - zfs=Zfs(module, name, properties) + zfs = Zfs(module, name, properties) if state == 'present': if zfs.exists(): diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py old mode 100755 new mode 100644 index d8b0384679c..bf86806ad52 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ejabberd_user version_added: "1.5" -author: Peter Sprygada +author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - ejabberd with mod_admin_extra @@ -113,7 +113,7 @@ class EjabberdUser(object): (rc, out, err) = self.run_command('check_account', options) except EjabberdUserException, e: (rc, out, err) = (1, None, "required attribute(s) missing") - return True if rc == 0 else False + return not bool(int(rc)) def log(self, entry): """ This method will log information to the local syslog facility """ diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py index 65b44d23047..9ec67b7c7b1 100644 --- a/web_infrastructure/jboss.py +++ b/web_infrastructure/jboss.py @@ -47,7 +47,7 @@ options: notes: - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - "Ensure no identically named application is deployed through the JBoss CLI" -author: Jeroen Hoekx +author: "Jeroen Hoekx (@jhoekx)" """ EXAMPLES = """ diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py index 950fc3dbfcf..79cfb72d4a7 100644 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -99,7 +99,7 @@ options: notes: - "Currently this only works with basic-auth." -author: Steve Smith +author: "Steve Smith (@tarka)" """ EXAMPLES = """ @@ -335,7 +335,7 @@ def main(): ret = method(restbase, user, passwd, module.params) - except Exception as e: + except Exception, e: return module.fail_json(msg=e.message) diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index 22e0d83e77c..4a033d23157 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -16,25 +16,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +$ErrorActionPreference = "Stop" + # WANT_JSON # POWERSHELL_COMMON -function Write-Log -{ - param - ( - [parameter(mandatory=$false)] - [System.String] - $message - ) - - $date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz' - - Write-Host "$date | $message" - - Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append -} - $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; @@ -48,21 +34,22 @@ Else Fail-Json $result "missing required argument: name" } -if(($params.logPath).length -gt 0) +If ($params.force) { - $global:LoggingFile = $params.logPath + $force = $params.force | ConvertTo-Bool } -else +Else { - $global:LoggingFile = "c:\ansible-playbook.log" + $force = $false } -If ($params.force) + +If ($params.upgrade) { - $force = $params.force | ConvertTo-Bool + $upgrade = $params.upgrade | ConvertTo-Bool } Else { - $force = $false + $upgrade = $false } If ($params.version) @@ -74,6 +61,15 @@ Else $version = $null } +If ($params.source) +{ + $source = $params.source.ToString().ToLower() +} +Else +{ + $source = $null +} + If ($params.showlog) { $showlog = $params.showlog | ConvertTo-Bool @@ -96,153 +92,230 @@ Else $state = "present" } -$ChocoAlreadyInstalled = get-command choco -ErrorAction 0 -if ($ChocoAlreadyInstalled -eq $null) +Function Chocolatey-Install-Upgrade { - #We need to install chocolatey - $install_choco_result = iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) - $result.changed = $true - $executable = "C:\ProgramData\chocolatey\bin\choco.exe" -} -Else -{ - $executable = "choco.exe" -} + [CmdletBinding()] -If ($params.source) -{ - $source = $params.source.ToString().ToLower() - If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby")) + param() + + $ChocoAlreadyInstalled = get-command choco -ErrorAction 0 + if ($ChocoAlreadyInstalled -eq $null) + { + #We need to install chocolatey + iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) + $result.changed = $true + $script:executable = "C:\ProgramData\chocolatey\bin\choco.exe" + } + else { - Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi or windowsfeatures." + $script:executable = "choco.exe" + + if ((choco --version) -lt '0.9.9') + { + Choco-Upgrade chocolatey + } } } -Elseif (!$params.source) + + +Function Choco-IsInstalled { - $source = "chocolatey" + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package + ) + + $cmd = "$executable list --local-only $package" + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + + Throw "Error checking installation status for $package" + } + + If ("$results" -match " $package .* (\d+) packages installed.") + { + return $matches[1] -gt 0 + } + + $false } -if ($source -eq "webpi") +Function Choco-Upgrade { - # check whether 'webpi' installation source is available; if it isn't, install it - $webpi_check_cmd = "$executable list webpicmd -localonly" - $webpi_check_result = invoke-expression $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_cmd" $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_log" $webpi_check_result - if ( - ( - ($webpi_check_result.GetType().Name -eq "String") -and - ($webpi_check_result -match "No packages found") - ) -or - ($webpi_check_result -contains "No packages found.") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #lessmsi is a webpicmd dependency, but dependency resolution fails unless it's installed separately - $lessmsi_install_cmd = "$executable install lessmsi" - $lessmsi_install_result = invoke-expression $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_cmd" $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_log" $lessmsi_install_result + throw "$package is not installed, you cannot upgrade" + } - $webpi_install_cmd = "$executable install webpicmd" - $webpi_install_result = invoke-expression $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_cmd" $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_log" $webpi_install_result + $cmd = "$executable upgrade -dv -y $package" - if (($webpi_install_result | select-string "already installed").length -gt 0) - { - #no change - } - elseif (($webpi_install_result | select-string "webpicmd has finished successfully").length -gt 0) + if ($version) + { + $cmd += " -version $version" + } + + if ($source) + { + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" + } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" + } + + if ("$results" -match ' upgraded (\d+)/\d+ package\(s\)\. ') + { + if ($matches[1] -gt 0) { $result.changed = $true } - Else - { - Fail-Json $result "WebPI install error: $webpi_install_result" - } } } -$expression = $executable -if ($state -eq "present") -{ - $expression += " install $package" -} -Elseif ($state -eq "absent") -{ - $expression += " uninstall $package" -} -if ($force) + +Function Choco-Install { - if ($state -eq "present") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force, + [Parameter(Mandatory=$false, Position=5)] + [bool]$upgrade + ) + + if (Choco-IsInstalled $package) { - $expression += " -force" + if ($upgrade) + { + Choco-Upgrade -package $package -version $version -source $source -force $force + } + + return } -} -if ($version) -{ - $expression += " -version $version" -} -if ($source -eq "chocolatey") -{ - $expression += " -source https://chocolatey.org/api/v2/" -} -elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source -eq "ruby")) -{ - $expression += " -source $source" -} -Set-Attr $result "chocolatey command" $expression -$op_result = invoke-expression $expression -if ($state -eq "present") -{ - if ( - (($op_result | select-string "already installed").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "No products to be installed").length -gt 0) - ) + $cmd = "$executable install -dv -y $package" + + if ($version) { - #no change + $cmd += " -version $version" } - elseif ( - (($op_result | select-string "has finished successfully").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "Install of Products: SUCCESS").length -gt 0) -or - (($op_result | select-string "gem installed").length -gt 0) -or - (($op_result | select-string "gems installed").length -gt 0) - ) + + if ($source) { - $result.changed = $true + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" } - Else + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) { - Fail-Json $result "Install error: $op_result" + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" } + + $result.changed = $true } -Elseif ($state -eq "absent") + +Function Choco-Uninstall { - $op_result = invoke-expression "$executable uninstall $package" - # HACK: Misleading - 'Uninstalling from folder' appears in output even when package is not installed, hence order of checks this way - if ( - (($op_result | select-string "not installed").length -gt 0) -or - (($op_result | select-string "Cannot find path").length -gt 0) + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #no change + return } - elseif (($op_result | select-string "Uninstalling from folder").length -gt 0) + + $cmd = "$executable uninstall -dv -y $package" + + if ($version) { - $result.changed = $true + $cmd += " -version $version" } - else + + if ($force) { - Fail-Json $result "Uninstall error: $op_result" + $cmd += " -force" } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error uninstalling $package" + } + + $result.changed = $true } +Try +{ + Chocolatey-Install-Upgrade + + if ($state -eq "present") + { + Choco-Install -package $package -version $version -source $source ` + -force $force -upgrade $upgrade + } + else + { + Choco-Uninstall -package $package -version $version -force $force + } -if ($showlog) + Exit-Json $result; +} +Catch { - Set-Attr $result "chocolatey_log" $op_result + Fail-Json $result $_.Exception.Message } -Set-Attr $result "chocolatey_success" "true" -Exit-Json $result; diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index 4df1f3c58e8..7f399dbd22f 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -53,40 +53,29 @@ options: - no default: no aliases: [] - version: + upgrade: description: - - Specific version of the package to be installed - - Ignored when state == 'absent' - required: false - default: null - aliases: [] - showlog: - description: - - Outputs the chocolatey log inside a chocolatey_log property. + - If package is already installed it, try to upgrade to the latest version or to the specified version required: false choices: - yes - no default: no aliases: [] - source: + version: description: - - Which source to install from - require: false - choices: - - chocolatey - - ruby - - webpi - - windowsfeatures - default: chocolatey + - Specific version of the package to be installed + - Ignored when state == 'absent' + required: false + default: null aliases: [] - logPath: + source: description: - - Where to log command output to + - Specify source rather than using default chocolatey repository require: false - default: c:\\ansible-playbook.log + default: null aliases: [] -author: Trond Hindenes, Peter Mounce +author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)" ''' # TODO: @@ -109,10 +98,8 @@ EXAMPLES = ''' name: git state: absent - # Install Application Request Routing v3 from webpi - # Logically, this requires that you install IIS first (see win_feature) - # To find a list of packages available via webpi source, `choco list -source webpi` + # Install git from specified repository win_chocolatey: - name: ARRv3 - source: webpi + name: git + source: https://someserver/api/v2/ ''' diff --git a/windows/win_environment.ps1 b/windows/win_environment.ps1 new file mode 100644 index 00000000000..1398524cfbb --- /dev/null +++ b/windows/win_environment.ps1 @@ -0,0 +1,69 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Jon Hawkesworth (@jhawkesworth) +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +If ($params.state) { + $state = $params.state.ToString().ToLower() + If (($state -ne 'present') -and ($state -ne 'absent') ) { + Fail-Json $result "state is '$state'; must be 'present', or 'absent'" + } +} else { + $state = 'present' +} + +If ($params.name) +{ + $name = $params.name +} else { + Fail-Json $result "missing required argument: name" +} + +$value = $params.value + +If ($params.level) { + $level = $params.level.ToString().ToLower() + If (( $level -ne 'machine') -and ( $level -ne 'user' ) -and ( $level -ne 'process')) { + Fail-Json $result "level is '$level'; must be 'machine', 'user', or 'process'" + } +} + +$before_value = [Environment]::GetEnvironmentVariable($name, $level) + +if ($state -eq "present" ) { + [Environment]::SetEnvironmentVariable($name, $value, $level) +} Elseif ($state -eq "absent") { + [Environment]::SetEnvironmentVariable($name, $null, $level) +} + +$after_value = [Environment]::GetEnvironmentVariable($name, $level) + +Set-Attr $result "name" $name; +Set-Attr $result "before_value" $before_value; +Set-Attr $result "value" $after_value; +Set-Attr $result "level" $level; +if ($before_value -ne $after_value) { + Set-Attr $result "changed" $true; +} + +Exit-Json $result; diff --git a/windows/win_environment.py b/windows/win_environment.py new file mode 100644 index 00000000000..8d4a1701695 --- /dev/null +++ b/windows/win_environment.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Jon Hawkesworth (@jhawkesworth) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_environment +version_added: "2.0" +short_description: Modifies environment variables on windows guests +description: + - Uses .net Environment to set or remove environment variables. + - Can set at User, Machine or Process level. + - Note that usual rules apply, so existing environments will not change until new processes are started. +options: + state: + description: + - present to ensure environment variable is set, or absent to ensure it is removed + required: false + default: present + choices: + - present + - absent + name: + description: + - The name of the environment variable + required: true + default: no default + value: + description: + - The value to store in the environment variable. Can be omitted for state=absent + required: false + default: no default + level: + description: + - The level at which to set the environment variable. + - Use 'machine' to set for all users. + - Use 'user' to set for the current user that ansible is connected as. + - Use 'process' to set for the current process. Probably not that useful. + required: true + default: no default + choices: + - machine + - process + - user +author: "Jon Hawkesworth (@jhawkesworth)" +''' + +EXAMPLES = ''' + # Set an environment variable for all users + win_environment: + state: present + name: TestVariable + value: "Test value" + level: machine + # Remove an environment variable for the current users + win_environment: + state: absent + name: TestVariable + level: user +''' + diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1 new file mode 100644 index 00000000000..3f2ab692b42 --- /dev/null +++ b/windows/win_iis_virtualdirectory.ps1 @@ -0,0 +1,128 @@ +#!powershell +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# Application +$application = Get-Attr $params "application" $FALSE; + +# State parameter +$state = Get-Attr $params "state" "present"; +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + directory = New-Object psobject + changed = $false +}; + +# Construct path +$directory_path = if($application) { + "IIS:\Sites\$($site)\$($application)\$($name)" +} else { + "IIS:\Sites\$($site)\$($name)" +} + +# Directory info +$directory = Get-WebVirtualDirectory -Site $site -Name $name + +try { + # Add directory + If(($state -eq 'present') -and (-not $directory)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $directory_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application) { + $directory_parameters.Application = $application + } + + $directory = New-WebVirtualDirectory @directory_parameters -Force + $result.changed = $true + } + + # Remove directory + If ($state -eq 'absent' -and $directory) { + Remove-Item $directory_path + $result.changed = $true + } + + $directory = Get-WebVirtualDirectory -Site $site -Name $name + If($directory) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $vdir_folder = Get-Item $directory.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $vdir_folder.FullName) { + Set-ItemProperty $directory_path -name physicalPath -value $physical_path + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$directory = Get-WebVirtualDirectory -Site $site -Name $name +$result.directory = New-Object psobject @{ + PhysicalPath = $directory.PhysicalPath +} + +Exit-Json $result diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py new file mode 100644 index 00000000000..e5bbd950007 --- /dev/null +++ b/windows/win_iis_virtualdirectory.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_virtualdirectory +version_added: "2.0" +short_description: Configures a IIS virtual directories. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - The name of the virtual directory to create or remove + required: true + default: null + aliases: [] + state: + description: + - Whether to add or remove the specified virtual directory + choices: + - absent + - present + required: false + default: null + aliases: [] + site: + description: + - The site name under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + application: + description: + - The application under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' + +''' diff --git a/windows/win_iis_webapplication.ps1 b/windows/win_iis_webapplication.ps1 new file mode 100644 index 00000000000..e576dd5081c --- /dev/null +++ b/windows/win_iis_webapplication.ps1 @@ -0,0 +1,132 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# State parameter +$state = Get-Attr $params "state" "present"; +$state.ToString().ToLower(); +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + application = New-Object psobject + changed = $false +}; + +# Application info +$application = Get-WebApplication -Site $site -Name $name + +try { + # Add application + If(($state -eq 'present') -and (-not $application)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $application_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $application_parameters.ApplicationPool = $application_pool + } + + $application = New-WebApplication @application_parameters -Force + $result.changed = $true + + } + + # Remove application + if ($state -eq 'absent' -and $application) { + $application = Remove-WebApplication -Site $site -Name $name + $result.changed = $true + } + + $application = Get-WebApplication -Site $site -Name $name + If($application) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $app_folder = Get-Item $application.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $app_folder.FullName) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name physicalPath -value $physical_path + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $application.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$application = Get-WebApplication -Site $site -Name $name +$result.application = New-Object psobject @{ + PhysicalPath = $application.PhysicalPath + ApplicationPool = $application.applicationPool +} + +Exit-Json $result diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py new file mode 100644 index 00000000000..b8ebd085162 --- /dev/null +++ b/windows/win_iis_webapplication.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_webapplication +version_added: "2.0" +short_description: Configures a IIS Web application. +description: + - Creates, Removes and configures a IIS Web applications +options: + name: + description: + - Name of the Web applicatio + required: true + default: null + aliases: [] + site: + description: + - Name of the site on which the application is created. + required: true + default: null + aliases: [] + state: + description: + - State of the web application + choices: + - present + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host + +''' diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1 new file mode 100644 index 00000000000..2ed369e4a3f --- /dev/null +++ b/windows/win_iis_webapppool.ps1 @@ -0,0 +1,112 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ('started', 'restarted', 'stopped', 'absent'); +If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +# Attributes parameter - Pipe separated list of attributes where +# keys and values are separated by comma (paramA:valyeA|paramB:valueB) +$attributes = @{}; +If ($params.attributes) { + $params.attributes -split '\|' | foreach { + $key, $value = $_ -split "\:"; + $attributes.Add($key, $value); + } +} + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $NULL){ + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + changed = $FALSE + attributes = $attributes +}; + +# Get pool +$pool = Get-Item IIS:\AppPools\$name + +try { + # Add + if (-not $pool -and $state -in ('started', 'stopped', 'restarted')) { + New-WebAppPool $name + $result.changed = $TRUE + } + + # Remove + if ($pool -and $state -eq 'absent') { + Remove-WebAppPool $name + $result.changed = $TRUE + } + + $pool = Get-Item IIS:\AppPools\$name + if($pool) { + # Set properties + $attributes.GetEnumerator() | foreach { + $newParameter = $_; + $currentParameter = Get-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key + if(-not $currentParameter -or ($currentParameter.Value -as [String]) -ne $newParameter.Value) { + Set-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key $newParameter.Value + $result.changed = $TRUE + } + } + + # Set run state + if (($state -eq 'stopped') -and ($pool.State -eq 'Started')) { + Stop-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) { + Start-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$pool = Get-Item IIS:\AppPools\$name +$result.info = @{ + name = $pool.Name + state = $pool.State + attributes = New-Object psobject @{} +}; + +$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)}; + +Exit-Json $result diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py new file mode 100644 index 00000000000..c77c3b04cb7 --- /dev/null +++ b/windows/win_iis_webapppool.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webapppool +version_added: "2.0" +short_description: Configures a IIS Web Application Pool. +description: + - Creates, Removes and configures a IIS Web Application Pool +options: + name: + description: + - Names of application pool + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - absent + - stopped + - started + - restarted + required: false + default: null + aliases: [] + attributes: + description: + - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing application pool +$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows +host | success >> { + "attributes": {}, + "changed": false, + "info": { + "attributes": { + "CLRConfigFile": "", + "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415", + "autoStart": true, + "enable32BitAppOnWin64": false, + "enableConfigurationOverride": true, + "managedPipelineMode": 0, + "managedRuntimeLoader": "webengine4.dll", + "managedRuntimeVersion": "v4.0", + "name": "DefaultAppPool", + "passAnonymousToken": true, + "queueLength": 1000, + "startMode": 0, + "state": 1 + }, + "name": "DefaultAppPool", + "state": "Started" + } +} + +# This creates a new application pool in 'Started' state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows + +# This stoppes an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This change application pool attributes without touching state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + +# This creates an application pool and sets attributes +$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + + +# Playbook example +--- + +- name: App Pool with .NET 4.0 + win_iis_webapppool: + name: 'AppPool' + state: started + attributes: managedRuntimeVersion:v4.0 + register: webapppool + +''' diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1 new file mode 100644 index 00000000000..bdff43fc63c --- /dev/null +++ b/windows/win_iis_webbinding.ps1 @@ -0,0 +1,138 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ($FALSE, 'present', 'absent'); +If ($state -NotIn $valid_states) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +$binding_parameters = New-Object psobject @{ + Name = $name +}; + +If ($params.host_header) { + $binding_parameters.HostHeader = $params.host_header +} + +If ($params.protocol) { + $binding_parameters.Protocol = $params.protocol +} + +If ($params.port) { + $binding_parameters.Port = $params.port +} + +If ($params.ip) { + $binding_parameters.IPAddress = $params.ip +} + +$certificateHash = Get-Attr $params "certificate_hash" $FALSE; +$certificateStoreName = Get-Attr $params "certificate_store_name" "MY"; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){ + Import-Module WebAdministration +} + +function Create-Binding-Info { + return New-Object psobject @{ + "bindingInformation" = $args[0].bindingInformation + "certificateHash" = $args[0].certificateHash + "certificateStoreName" = $args[0].certificateStoreName + "isDsMapperEnabled" = $args[0].isDsMapperEnabled + "protocol" = $args[0].protocol + "sslFlags" = $args[0].sslFlags + } +} + +# Result +$result = New-Object psobject @{ + changed = $false + parameters = $binding_parameters + matched = @() + removed = @() + added = @() +}; + +# Get bindings matching parameters +$curent_bindings = Get-WebBinding @binding_parameters +$curent_bindings | Foreach { + $result.matched += Create-Binding-Info $_ +} + +try { + # Add + if (-not $curent_bindings -and $state -eq 'present') { + New-WebBinding @binding_parameters -Force + + # Select certificat + if($certificateHash -ne $FALSE) { + + $ip = $binding_parameters.IPAddress + if((!$ip) -or ($ip -eq "*")) { + $ip = "0.0.0.0" + } + + $port = $binding_parameters.Port + if(!$port) { + $port = 443 + } + + $result.port = $port + $result.ip = $ip + + Push-Location IIS:\SslBindings\ + Get-Item Cert:\LocalMachine\$certificateStoreName\$certificateHash | New-Item "$($ip)!$($port)" + Pop-Location + } + + $result.added += Create-Binding-Info (Get-WebBinding @binding_parameters) + $result.changed = $true + } + + # Remove + if ($curent_bindings -and $state -eq 'absent') { + $curent_bindings | foreach { + Remove-WebBinding -InputObject $_ + $result.removed += Create-Binding-Info $_ + } + $result.changed = $true + } + + +} +catch { + Fail-Json $result $_.Exception.Message +} + +Exit-Json $result diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py new file mode 100644 index 00000000000..061bed73723 --- /dev/null +++ b/windows/win_iis_webbinding.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webbinding +version_added: "2.0" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a binding to an existing IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - present + - absent + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + host_header: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + certificate_hash: + description: + - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate. + required: false + default: null + aliases: [] + certificate_store_name: + description: + - Name of the certificate store where the certificate for the binding is located. + required: false + default: "My" + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This will return binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site'" windows +host | success >> { + "added": [], + "changed": false, + "matched": [ + { + "bindingInformation": "*:80:", + "certificateHash": "", + "certificateStoreName": "", + "isDsMapperEnabled": false, + "protocol": "http", + "sslFlags": 0 + } + ], + "parameters": { + "Name": "Default Web Site" + }, + "removed": [] +} + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https" windows + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port:9090 state=present" windows + +# This will add a HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will remove the HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will add a HTTPS binding +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https state=present" windows + +# This will add a HTTPS binding and select certificate to use +# ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https certificate_hash= B0D0FA8408FC67B230338FCA584D03792DA73F4C" windows + + +# Playbook example +--- + +- name: Website http/https bidings + win_iis_webbinding: + name: "Default Web Site" + protocol: https + port: 443 + certificate_hash: "D1A3AF8988FD32D1A3AF8988FD323792DA73F4C" + state: present + when: monitor_use_https + +''' diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1 new file mode 100644 index 00000000000..bba1e941142 --- /dev/null +++ b/windows/win_iis_website.ps1 @@ -0,0 +1,179 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$state.ToString().ToLower(); +If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted') -and ($state -ne 'absent')) { + Fail-Json (New-Object psobject) "state is '$state'; must be 'started', 'restarted', 'stopped' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + +# Binding Parameters +$bind_port = Get-Attr $params "port" $FALSE; +$bind_ip = Get-Attr $params "ip" $FALSE; +$bind_hostname = Get-Attr $params "hostname" $FALSE; +$bind_ssl = Get-Attr $params "ssl" $FALSE; + +# Custom site Parameters from string where properties +# are seperated by a pipe and property name/values by colon. +# Ex. "foo:1|bar:2" +$parameters = Get-Attr $params "parameters" $null; +if($parameters -ne $null) { + $parameters = @($parameters -split '\|' | ForEach { + return ,($_ -split "\:", 2); + }) +} + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + site = New-Object psobject + changed = $false +}; + +# Site info +$site = Get-Website -Name $name + +Try { + # Add site + If(($state -ne 'absent') -and (-not $site)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + ElseIf (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $site_parameters = New-Object psobject @{ + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $site_parameters.ApplicationPool = $application_pool + } + + If ($bind_port) { + $site_parameters.Port = $bind_port + } + + If ($bind_ip) { + $site_parameters.IPAddress = $bind_ip + } + + If ($bind_hostname) { + $site_parameters.HostHeader = $bind_hostname + } + + $site = New-Website @site_parameters -Force + $result.changed = $true + } + + # Remove site + If ($state -eq 'absent' -and $site) { + $site = Remove-Website -Name $name + $result.changed = $true + } + + $site = Get-Website -Name $name + If($site) { + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $folder = Get-Item $physical_path + If($folder.FullName -ne $site.PhysicalPath) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name physicalPath -value $folder.FullName + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $site.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + + # Set properties + if($parameters) { + $parameters | foreach { + $parameter_value = Get-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] + if((-not $parameter_value) -or ($parameter_value.Value -as [String]) -ne $_[1]) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] $_[1] + $result.changed = $true + } + } + } + + # Set run state + if (($state -eq 'stopped') -and ($site.State -eq 'Started')) + { + Stop-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + if ((($state -eq 'started') -and ($site.State -eq 'Stopped')) -or ($state -eq 'restarted')) + { + Start-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + } +} +Catch +{ + Fail-Json (New-Object psobject) $_.Exception.Message +} + +$site = Get-Website -Name $name +$result.site = New-Object psobject @{ + Name = $site.Name + ID = $site.ID + State = $site.State + PhysicalPath = $site.PhysicalPath + ApplicationPool = $site.applicationPool + Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation }) +} + + +Exit-Json $result diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py new file mode 100644 index 00000000000..8921afe5970 --- /dev/null +++ b/windows/win_iis_website.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_website +version_added: "2.0" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the web site + choices: + - started + - restarted + - stopped + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new site. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + hostname: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + ssl: + description: + - Enables HTTPS binding on the site.. + required: false + default: null + aliases: [] + parameters: + description: + - Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing host +$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window +host | success >> { + "changed": false, + "site": { + "ApplicationPool": "DefaultAppPool", + "Bindings": [ + "*:80:" + ], + "ID": 1, + "Name": "Default Web Site", + "PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot", + "State": "Stopped" + } +} + +# This stops an existing site. +$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host + +# This creates a new site. +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + +# Change logfile . +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + + +# Playbook example +--- + +- name: Acme IIS site + win_iis_website: + name: "Acme" + state: started + port: 80 + ip: 127.0.0.1 + hostname: acme.local + application_pool: "acme" + physical_path: 'c:\\sites\\acme' + parameters: 'logfile.directory:c:\\sites\\logs' + register: website + +''' diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 new file mode 100644 index 00000000000..2f802f59cd0 --- /dev/null +++ b/windows/win_scheduled_task.ps1 @@ -0,0 +1,74 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Peter Mounce +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +$ErrorActionPreference = "Stop" + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +if ($params.name) +{ + $name = $params.name +} +else +{ + Fail-Json $result "missing required argument: name" +} +if ($params.enabled) +{ + $enabled = $params.enabled | ConvertTo-Bool +} +else +{ + $enabled = $true +} +$target_state = @{$true = "Enabled"; $false="Disabled"}[$enabled] + +try +{ + $tasks = Get-ScheduledTask -TaskPath $name + $tasks_needing_changing = $tasks |? { $_.State -ne $target_state } + if (-not($tasks_needing_changing -eq $null)) + { + if ($enabled) + { + $tasks_needing_changing | Enable-ScheduledTask + } + else + { + $tasks_needing_changing | Disable-ScheduledTask + } + Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) + $result.changed = $true + } + else + { + Set-Attr $result "tasks_changed" @() + $result.changed = $false + } + + Exit-Json $result; +} +catch +{ + Fail-Json $result $_.Exception.Message +} diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py new file mode 100644 index 00000000000..2c5867402c5 --- /dev/null +++ b/windows/win_scheduled_task.py @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Peter Mounce +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_scheduled_task +version_added: "2.0" +short_description: Manage scheduled tasks +description: + - Manage scheduled tasks +options: + name: + description: + - Name of the scheduled task + - Supports * as wildcard + required: true + enabled: + description: + - State that the task should become + required: false + choices: + - yes + - no + default: yes +author: Peter Mounce +''' + +EXAMPLES = ''' + # Disable the scheduled tasks with "WindowsUpdate" in their name + win_scheduled_task: name="*WindowsUpdate*" enabled=no +''' diff --git a/windows/win_updates.py b/windows/win_updates.py index 7eefd8ba331..4a9f055d8dc 100644 --- a/windows/win_updates.py +++ b/windows/win_updates.py @@ -41,7 +41,7 @@ options: - (anything that is a valid update category) default: critical aliases: [] -author: Peter Mounce +author: "Peter Mounce (@petemounce)" ''' EXAMPLES = '''