From 5f870b094b4e682c654ff6c298f4dd3b9e5dd486 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 14:26:47 +0100 Subject: [PATCH 001/157] added a source/network add/remove to/from zone for firewalld --- system/firewalld.py | 55 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 22db165aad3..ec4be051c9e 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -41,6 +41,11 @@ options: - "Rich rule to add/remove to/from firewalld" required: false default: null + source: + description: + - 'The source/network you would like to add/remove to/from firewalld' + required: false + default: null zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -73,6 +78,7 @@ EXAMPLES = ''' - firewalld: port=8081/tcp permanent=true state=disabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +- firewalld: source='192.168.1.0/24' zone=internal state=enabled ''' import os @@ -128,7 +134,29 @@ def set_port_disabled_permanent(zone, port, protocol): fw_settings = fw_zone.getSettings() fw_settings.removePort(port, protocol) fw_zone.update(fw_settings) - + +#################### +# source handling +# +def get_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if source in fw_settings.getSources(): + return True + else: + return False + +def add_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addSource(source) + fw_zone.update(fw_settings) + +def remove_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeSource(source) + fw_zone.update(fw_settings) #################### # service handling @@ -210,12 +238,15 @@ def main(): port=dict(required=False,default=None), rich_rule=dict(required=False,default=None), zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), + source=dict(required=False,default=None), + permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), ), supports_check_mode=True ) + if module.params['source'] == None and module.params['permanent'] == None: + module.fail(msg='permanent is a required parameter') ## Pre-run version checking if FW_VERSION < "0.2.11": @@ -226,6 +257,7 @@ def main(): msgs = [] service = module.params['service'] rich_rule = module.params['rich_rule'] + source = module.params['source'] if module.params['port'] != None: port, protocol = module.params['port'].split('/') @@ -304,6 +336,25 @@ def main(): if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + if source != None: + is_enabled = get_source(zone, source) + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + add_source(zone, source) + changed=True + msgs.append("Added %s to zone %s" % (source, zone)) + elif desired_state == "disabled": + if is_enabled == True: + msgs.append("source is present") + if module.check_mode: + module.exit_json(changed=True) + + remove_source(zone, source) + changed=True + msgs.append("Removed %s from zone %s" % (source, zone)) if port != None: if permanent: is_enabled = get_port_enabled_permanent(zone, [port, protocol]) From d6fbfdefd5ced3c8db63f0bef14900a816fddb5b Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 15:39:07 +0100 Subject: [PATCH 002/157] added a source/network add/remove to/from zone for firewalld - removed useless comment --- system/firewalld.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index ec4be051c9e..ed49f0860be 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -150,13 +150,11 @@ def add_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.addSource(source) - fw_zone.update(fw_settings) def remove_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.removeSource(source) - fw_zone.update(fw_settings) #################### # service handling @@ -348,7 +346,6 @@ def main(): msgs.append("Added %s to zone %s" % (source, zone)) elif desired_state == "disabled": if is_enabled == True: - msgs.append("source is present") if module.check_mode: module.exit_json(changed=True) From 6fab8f49a965c708be9ac2290c074d050d6a6832 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 14:26:47 +0100 Subject: [PATCH 003/157] added a source/network add/remove to/from zone for firewalld --- system/firewalld.py | 55 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index dedc9260740..ace5e5fd1e4 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -41,6 +41,11 @@ options: - "Rich rule to add/remove to/from firewalld" required: false default: null + source: + description: + - 'The source/network you would like to add/remove to/from firewalld' + required: false + default: null zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -77,6 +82,7 @@ EXAMPLES = ''' - firewalld: port=8081/tcp permanent=true state=disabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +- firewalld: source='192.168.1.0/24' zone=internal state=enabled ''' import os @@ -132,7 +138,29 @@ def set_port_disabled_permanent(zone, port, protocol): fw_settings = fw_zone.getSettings() fw_settings.removePort(port, protocol) fw_zone.update(fw_settings) - + +#################### +# source handling +# +def get_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if source in fw_settings.getSources(): + return True + else: + return False + +def add_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addSource(source) + fw_zone.update(fw_settings) + +def remove_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeSource(source) + fw_zone.update(fw_settings) #################### # service handling @@ -214,13 +242,16 @@ def main(): port=dict(required=False,default=None), rich_rule=dict(required=False,default=None), zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), immediate=dict(type='bool',default=False), + source=dict(required=False,default=None), + permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), ), supports_check_mode=True ) + if module.params['source'] == None and module.params['permanent'] == None: + module.fail(msg='permanent is a required parameter') ## Pre-run version checking if FW_VERSION < "0.2.11": @@ -231,6 +262,7 @@ def main(): msgs = [] service = module.params['service'] rich_rule = module.params['rich_rule'] + source = module.params['source'] if module.params['port'] != None: port, protocol = module.params['port'].split('/') @@ -310,6 +342,25 @@ def main(): if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + if source != None: + is_enabled = get_source(zone, source) + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + add_source(zone, source) + changed=True + msgs.append("Added %s to zone %s" % (source, zone)) + elif desired_state == "disabled": + if is_enabled == True: + msgs.append("source is present") + if module.check_mode: + module.exit_json(changed=True) + + remove_source(zone, source) + changed=True + msgs.append("Removed %s from zone %s" % (source, zone)) if port != None: if permanent: is_enabled = get_port_enabled_permanent(zone, [port, protocol]) From b365fc44645a4d81b7e7780708a4b7dd24faf1ce Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 15:39:07 +0100 Subject: [PATCH 004/157] added a source/network add/remove to/from zone for firewalld - removed useless comment --- system/firewalld.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index ace5e5fd1e4..cf90c5ace56 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -154,13 +154,11 @@ def add_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.addSource(source) - fw_zone.update(fw_settings) def remove_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.removeSource(source) - fw_zone.update(fw_settings) #################### # service handling @@ -354,7 +352,6 @@ def main(): msgs.append("Added %s to zone %s" % (source, zone)) elif desired_state == "disabled": if is_enabled == True: - msgs.append("source is present") if module.check_mode: module.exit_json(changed=True) From 192bf06af9709c7fdaefdbbd8c00329262747a7f Mon Sep 17 00:00:00 2001 From: Boris Ekelchik Date: Wed, 24 Dec 2014 11:52:52 -0800 Subject: [PATCH 005/157] New sts_assume_role module --- cloud/amazon/sts_assume_role.py | 166 ++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 cloud/amazon/sts_assume_role.py diff --git a/cloud/amazon/sts_assume_role.py b/cloud/amazon/sts_assume_role.py new file mode 100644 index 00000000000..7e02dbbd84e --- /dev/null +++ b/cloud/amazon/sts_assume_role.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: sts_assume_role +short_description: assume a role in AWS account and obtain temporary credentials. +description: + - call AWS STS (Security Token Service) to assume a role in AWS account and obtain temporary credentials. This module has a dependency on python-boto. + For details on base AWS API reference http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +version_added: "1.7" +options: + role_arn: + description: + - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) + required: true + aliases: [] + role_session_name: + description: + - Name of the role's session - will be used by CloudTrail + required: true + aliases: [] + policy: + description: + - Supplemental policy to use in addition to assumed role's policies. + required: false + default: null + aliases: [] + duration_seconds: + description: + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. + required: false + default: null + aliases: [] + external_id: + description: + - A unique identifier that is used by third parties to assume a role in their customers' accounts. + required: false + default: null + aliases: [] + mfa_serial_number: + description: + - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. + required: false + default: null + aliases: [] + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + required: false + default: null + aliases: [] + +author: Boris Ekelchik +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic example of assuming a role +tasks: +- name: assume a role in account 123456789012 + sts_assume_role: role_arn="arn:aws:iam::123456789012:role/someRole" session_name="someRoleSession" + +- name: display temporary credentials + debug: "temporary credentials for the assumed role are {{ ansible_temp_credentials }}" + +- name: use temporary credentials for tagging an instance in account 123456789012 + ec2_tag: resource=i-xyzxyz01 region=us-west-1 state=present + args: + aws_access_key: "{{ ansible_temp_credentials.access_key }}" + aws_secret_key: "{{ ansible_temp_credentials.secret_key }}" + security_token: "{{ ansible_temp_credentials.session_token }}" + + tags: + Test: value +''' + +import sys +import time + +try: + import boto.sts + +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +def sts_connect(module): + + """ Return an STS connection""" + + region, ec2_url, boto_params = get_aws_connection_info(module) + + # If we have a region specified, connect to its endpoint. + if region: + try: + sts = connect_to_aws(boto.sts, region, **boto_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to connect_sts method + else: + try: + sts = boto.connect_sts(**boto_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + + return sts + +def assumeRole(): + data = sts.assume_role() + return data + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + role_arn = dict(required=True), + role_session_name = dict(required=True), + duraction_seconds = dict(), + external_id = dict(), + policy = dict(), + mfa_serial_number = dict(), + mfa_token = dict(), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + role_arn = module.params.get('role_arn') + role_session_name = module.params.get('role_session_name') + policy = module.params.get('policy') + duraction_seconds = module.params.get('duraction_seconds') + external_id = module.params.get('external_id') + mfa_serial_number = module.params.get('mfa_serial_number') + mfa_token = module.params.get('mfa_token') + + sts = sts_connect(module) + + temp_credentials = {} + + try: + temp_credentials = sts.assume_role(role_arn, role_session_name, policy, duraction_seconds, + external_id, mfa_serial_number, mfa_token).credentials.__dict__ + except boto.exception.BotoServerError, e: + module.fail_json(msg='Unable to assume role {0}, error: {1}'.format(role_arn, e)) + result = dict(changed=False, ansible_facts=dict(ansible_temp_credentials=temp_credentials)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From a21e23846d20359d7d20236431e0bb662cd2a851 Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Tue, 30 Dec 2014 08:42:00 -0600 Subject: [PATCH 006/157] init commit --- windows/win_unzip.ps1 | 83 +++++++++++++++++++++++++++++++++++++++ windows/win_unzip.py | 90 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+) create mode 100644 windows/win_unzip.ps1 create mode 100644 windows/win_unzip.py diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 new file mode 100644 index 00000000000..de9fb73e7ce --- /dev/null +++ b/windows/win_unzip.ps1 @@ -0,0 +1,83 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2014, Phil Schwartz +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object psobject @{ + win_unzip = New-Object psobject + changed = $false +} + +If ($params.zip) { + $zip = $params.zip.toString() + + If (-Not (Test-Path -path $zip)){ + Fail-Json $result "zip file: $zip does not exist." + } +} +Else { + Fail-Json $result "missing required argument: zip" +} + +If (-Not($params.dest -eq $null)) { + $dest = $params.dest.toString() + + If (-Not (Test-Path $dest -PathType Container)){ + New-Item -itemtype directory -path $dest + } +} +Else { + Fail-Json $result "missing required argument: dest" +} + +Try { + cd C:\ + $shell = New-Object -ComObject Shell.Application + $shell.NameSpace($dest).copyhere(($shell.NameSpace($zip)).items(), 20) + $result.changed = $true +} +Catch { + $sp = $zip.split(".") + $ext = $sp[$sp.length-1] + + # Used to allow reboot after exe hotfix extraction (Windows 2008 R2 SP1) + # This will have no effect in most cases. + If (-Not ($ext -eq "exe")){ + $result.changed = $false + Fail-Json $result "Error unzipping $zip to $dest" + } +} + +If ($params.rm -eq "true"){ + Remove-Item $zip -Recurse -Force + Set-Attr $result.win_unzip "rm" "true" +} + +If ($params.restart -eq "true") { + Restart-Computer -Force + Set-Attr $result.win_unzip "restart" "true" +} + + +Set-Attr $result.win_unzip "zip" $zip.toString() +Set-Attr $result.win_unzip "dest" $dest.toString() + +Exit-Json $result; diff --git a/windows/win_unzip.py b/windows/win_unzip.py new file mode 100644 index 00000000000..ae2bfa94ad8 --- /dev/null +++ b/windows/win_unzip.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Phil Schwartz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_unzip +version_added: "" +short_description: Unzips compressed files on the Windows node +description: + - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). +options: + zip: + description: + - Zip file to be unzipped (provide absolute path) + required: true + default: null + aliases: [] + dest: + description: + - Destination of zip file (provide absolute path of directory) + required: true + default: null + aliases: [] + rm: + description: + - Remove the zip file, after unzipping + required: no + default: false + aliases: [] + restart: + description: + - Restarts the computer after unzip, can be useful for hotfixes such as http://support.microsoft.com/kb/2842230 (Restarts will have to be accounted for with wait_for module) + choices: + - true + - false + required: false + default: false + aliases: [] +author: Phil Schwartz +''' + +EXAMPLES = ''' +# This unzips hotfix http://support.microsoft.com/kb/2842230 and forces reboot (for hotfix to take effect) +$ ansible -i hosts -m win_unzip -a "zip=C:\\463984_intl_x64_zip.exe dest=C:\\Hotfix restart=true" all +# This unzips a library that was downloaded with win_get_url, and removes the file after extraction +$ ansible -i hosts -m win_unzip -a "zip=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all +# Playbook example +--- +- name: Install WinRM PowerShell Hotfix for Windows Server 2008 SP1 + hosts: all + gather_facts: false + tasks: + - name: Grab Hotfix from URL + win_get_url: + url: 'http://hotfixv4.microsoft.com/Windows%207/Windows%20Server2008%20R2%20SP1/sp2/Fix467402/7600/free/463984_intl_x64_zip.exe' + dest: 'C:\\463984_intl_x64_zip.exe' + - name: Unzip hotfix + win_unzip: + zip: "C:\\463984_intl_x64_zip.exe" + dest: "C:\\Hotfix" + restart: true + - name: Wait for server reboot... + local_action: + module: wait_for + host={{ inventory_hostname }} + port={{ansible_ssh_port|default(5986)}} + delay=15 + timeout=600 + state=started +''' From fd12a5cc8446eb302e643349f9c82bef275e95f6 Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Tue, 30 Dec 2014 09:54:22 -0600 Subject: [PATCH 007/157] specifies creation of directory if !exists - added try catch for creation of directory, in case of an invalid path specified - added specification to documentation --- windows/win_unzip.ps1 | 7 ++++++- windows/win_unzip.py | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index de9fb73e7ce..e76c51dc6aa 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -41,7 +41,12 @@ If (-Not($params.dest -eq $null)) { $dest = $params.dest.toString() If (-Not (Test-Path $dest -PathType Container)){ - New-Item -itemtype directory -path $dest + Try{ + New-Item -itemtype directory -path $dest + } + Catch { + Fail-Json $result "Error creating $dest directory" + } } } Else { diff --git a/windows/win_unzip.py b/windows/win_unzip.py index ae2bfa94ad8..f9ba5ded0d0 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -27,7 +27,7 @@ module: win_unzip version_added: "" short_description: Unzips compressed files on the Windows node description: - - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). + - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will allow removal of the zip file after extraction. options: zip: description: @@ -37,7 +37,7 @@ options: aliases: [] dest: description: - - Destination of zip file (provide absolute path of directory) + - Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created. required: true default: null aliases: [] From 61d3f23c032457ff1a350b7859b5aca193ad4eb9 Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Wed, 31 Dec 2014 10:34:32 -0600 Subject: [PATCH 008/157] edit check for extension to use library func --- windows/win_unzip.ps1 | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index e76c51dc6aa..6772792be08 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -41,7 +41,7 @@ If (-Not($params.dest -eq $null)) { $dest = $params.dest.toString() If (-Not (Test-Path $dest -PathType Container)){ - Try{ + Try{ New-Item -itemtype directory -path $dest } Catch { @@ -54,18 +54,14 @@ Else { } Try { - cd C:\ $shell = New-Object -ComObject Shell.Application $shell.NameSpace($dest).copyhere(($shell.NameSpace($zip)).items(), 20) $result.changed = $true } Catch { - $sp = $zip.split(".") - $ext = $sp[$sp.length-1] - # Used to allow reboot after exe hotfix extraction (Windows 2008 R2 SP1) # This will have no effect in most cases. - If (-Not ($ext -eq "exe")){ + If (-Not ([System.IO.Path]::GetExtension($zip) -match ".exe")){ $result.changed = $false Fail-Json $result "Error unzipping $zip to $dest" } From a95fabeeb2fff2d0e16b532d35da4dd60adb0b22 Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Wed, 31 Dec 2014 19:17:53 -0600 Subject: [PATCH 009/157] fixes rm & restart param checks --- windows/win_unzip.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index 6772792be08..e77aa9e1df3 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -67,12 +67,12 @@ Catch { } } -If ($params.rm -eq "true"){ +If ($params.rm -eq "true" -Or $params.rm -eq "yes"){ Remove-Item $zip -Recurse -Force Set-Attr $result.win_unzip "rm" "true" } -If ($params.restart -eq "true") { +If ($params.restart -eq "true" -Or $params.restart -eq "yes") { Restart-Computer -Force Set-Attr $result.win_unzip "restart" "true" } From 99927a5c54aeca8ff18359f17d776a5f624d32a7 Mon Sep 17 00:00:00 2001 From: schwartzmx Date: Sun, 11 Jan 2015 13:03:26 -0600 Subject: [PATCH 010/157] =?UTF-8?q?updates=20docs,=20adds=20unzip=20func?= =?UTF-8?q?=20for=20bz2=20gz=20tar=20msu=C2=AC=20-=20Added=20functionality?= =?UTF-8?q?=20for=20unzipping/decompressing=20bzip=20gzip=20tar=20exe=20(s?= =?UTF-8?q?elf=20extracting)=20and=20msu=20(ms=20update)=20files=20to=20co?= =?UTF-8?q?incide=20with=20added=20functionality=20to=20win=5Fzip=C2=AC=20?= =?UTF-8?q?=20=20-=20Added=20functionality=20requires=20PSCX=20(it=20will?= =?UTF-8?q?=20be=20installed=20if=20it=20can't=20be=20imported)=C2=AC?= =?UTF-8?q?=C2=AC=20=20=20=20=20-=20First=20try=20with=20chocolatey,=20if?= =?UTF-8?q?=20fail,=20direct=20install=20from=20msi=20-=20Added=20recurse?= =?UTF-8?q?=20param=20to=20recursively=20unzip=20files=20from=20a=20compre?= =?UTF-8?q?ssed=20folder=C2=AC=20=20=20-=20useful=20for=20example:=20unzip?= =?UTF-8?q?ping=20a=20Log.zip=20file=20that=20contains=20a=20load=20of=20.?= =?UTF-8?q?gz=20files=C2=AC=20=20=20=20=20-=20setting=20rm=20param=20to=20?= =?UTF-8?q?true=20will=20remove=20all=20compressed=20files=20after=20decom?= =?UTF-8?q?pressing=C2=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- windows/win_unzip.ps1 | 147 ++++++++++++++++++++++++++++++++++++------ windows/win_unzip.py | 53 +++++++++++++-- 2 files changed, 174 insertions(+), 26 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index e77aa9e1df3..f31a6273a39 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -26,15 +26,17 @@ $result = New-Object psobject @{ changed = $false } -If ($params.zip) { - $zip = $params.zip.toString() +If ($params.src) { + $src = $params.src.toString() - If (-Not (Test-Path -path $zip)){ - Fail-Json $result "zip file: $zip does not exist." + If (-Not (Test-Path -path $src)){ + Fail-Json $result "src file: $src does not exist." } + + $ext = [System.IO.Path]::GetExtension($dest) } Else { - Fail-Json $result "missing required argument: zip" + Fail-Json $result "missing required argument: src" } If (-Not($params.dest -eq $null)) { @@ -53,22 +55,120 @@ Else { Fail-Json $result "missing required argument: dest" } -Try { - $shell = New-Object -ComObject Shell.Application - $shell.NameSpace($dest).copyhere(($shell.NameSpace($zip)).items(), 20) - $result.changed = $true +If ($params.recurse -eq "true" -Or $params.recurse -eq "yes") { + $recurse = $true } -Catch { - # Used to allow reboot after exe hotfix extraction (Windows 2008 R2 SP1) - # This will have no effect in most cases. - If (-Not ([System.IO.Path]::GetExtension($zip) -match ".exe")){ - $result.changed = $false - Fail-Json $result "Error unzipping $zip to $dest" - } +Else { + $recurse = $false } If ($params.rm -eq "true" -Or $params.rm -eq "yes"){ - Remove-Item $zip -Recurse -Force + $rm = $true + Set-Attr $result.win_unzip "rm" "true" +} +Else { + $rm = $false +} + +If ($ext -eq ".zip" -And $recurse -eq $false) { + Try { + $shell = New-Object -ComObject Shell.Application + $shell.NameSpace($dest).copyhere(($shell.NameSpace($src)).items(), 20) + $result.changed = $true + } + Catch { + Fail-Json $result "Error unzipping $src to $dest" + } +} +# Need PSCX +Else { + # Requires PSCX, will be installed if it isn't found + # Pscx-3.2.0.msi + $url = "http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959" + $msi = "C:\Pscx-3.2.0.msi" + + # Check if PSCX is installed + $list = Get-Module -ListAvailable + # If not download it and install + If (-Not ($list -match "PSCX")) { + # Try install with chocolatey + Try { + cinst -force PSCX + $choco = $true + } + Catch { + $choco = $false + } + # install from downloaded msi if choco failed or is not present + If ($choco -eq $false) { + Try { + $client = New-Object System.Net.WebClient + $client.DownloadFile($url, $msi) + } + Catch { + Fail-Json $result "Error downloading PSCX from $url and saving as $dest" + } + Try { + msiexec.exe /i $msi /qb + # Give it a chance to install, so that it can be imported + sleep 10 + } + Catch { + Fail-Json $result "Error installing $msi" + } + } + Set-Attr $result.win_zip "pscx_status" "pscx was installed" + $installed = $true + } + Else { + Set-Attr $result.win_zip "pscx_status" "present" + } + + # Import + Try { + If ($installed) { + Import-Module 'C:\Program Files (x86)\Powershell Community Extensions\pscx3\pscx\pscx.psd1' + } + Else { + Import-Module PSCX + } + } + Catch { + Fail-Json $result "Error importing module PSCX" + } + + Try { + If ($recurse) { + Expand-Archive -Path $src -OutputPath $dest -Force + + If ($rm) { + Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % { + Expand-Archive $_.FullName -OutputPath $dest -Force + Remove-Item $_.FullName -Force + } + } + Else { + Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % { + Expand-Archive $_.FullName -OutputPath $dest -Force + } + } + } + Else { + Expand-Archive -Path $src -OutputPath $dest -Force + } + } + Catch { + If ($recurse) { + Fail-Json "Error recursively expanding $src to $dest" + } + Else { + Fail-Json "Error expanding $src to $dest" + } + } +} + +If ($rm -eq $true){ + Remove-Item $src -Recurse -Force Set-Attr $result.win_unzip "rm" "true" } @@ -77,8 +177,17 @@ If ($params.restart -eq "true" -Or $params.restart -eq "yes") { Set-Attr $result.win_unzip "restart" "true" } - -Set-Attr $result.win_unzip "zip" $zip.toString() +# Fixes a fail error message (when the task actually succeeds) for a "Convert-ToJson: The converted JSON string is in bad format" +# This happens when JSON is parsing a string that ends with a "\", which is possible when specifying a directory to download to. +# This catches that possible error, before assigning the JSON $result +If ($src[$src.length-1] -eq "\") { + $src = $src.Substring(0, $src.length-1) +} +If ($dest[$dest.length-1] -eq "\") { + $dest = $dest.Substring(0, $dest.length-1) +} +Set-Attr $result.win_unzip "src" $src.toString() Set-Attr $result.win_unzip "dest" $dest.toString() +Set-Attr $result.win_unzip "recurse" $recurse.toString() Exit-Json $result; diff --git a/windows/win_unzip.py b/windows/win_unzip.py index f9ba5ded0d0..35093aa8c76 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -27,11 +27,11 @@ module: win_unzip version_added: "" short_description: Unzips compressed files on the Windows node description: - - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will allow removal of the zip file after extraction. + - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). Has ability to recursively unzip files within the src zip file provided using Read-Archive and piping to Expand-Archive (Using PSCX). If the destination directory does not exist, it will be created before unzipping the file. If a .zip file is specified as src and recurse is true then PSCX will be installed. Specifying rm parameter will allow removal of the src file after extraction. options: - zip: + src: description: - - Zip file to be unzipped (provide absolute path) + - File to be unzipped (provide absolute path) required: true default: null aliases: [] @@ -45,14 +45,32 @@ options: description: - Remove the zip file, after unzipping required: no + choices: + - true + - false + - yes + - no default: false aliases: [] + recurse: + description: + - Recursively expand zipped files within the src file. + required: no + default: false + choices: + - true + - false + - yes + - no + aliases: [] restart: description: - Restarts the computer after unzip, can be useful for hotfixes such as http://support.microsoft.com/kb/2842230 (Restarts will have to be accounted for with wait_for module) choices: - true - false + - yes + - no required: false default: false aliases: [] @@ -60,11 +78,31 @@ author: Phil Schwartz ''' EXAMPLES = ''' -# This unzips hotfix http://support.microsoft.com/kb/2842230 and forces reboot (for hotfix to take effect) -$ ansible -i hosts -m win_unzip -a "zip=C:\\463984_intl_x64_zip.exe dest=C:\\Hotfix restart=true" all # This unzips a library that was downloaded with win_get_url, and removes the file after extraction -$ ansible -i hosts -m win_unzip -a "zip=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all +$ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all # Playbook example + +# Simple unzip +--- +- name: Unzip a bz2 (BZip) file + win_unzip: + src: "C:\Users\Phil\Logs.bz2" + dest: "C:\Users\Phil\OldLogs" + +# This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion. +--- +- name: Unzip ApplicationLogs.zip and decompress all GZipped log files + hosts: all + gather_facts: false + tasks: + - name: Recursively decompress GZ files in ApplicationLogs.zip + win_unzip: + src: C:\Downloads\ApplicationLogs.zip + dest: C:\Application\Logs + recurse: yes + rm: true + +# Install hotfix (self-extracting .exe) --- - name: Install WinRM PowerShell Hotfix for Windows Server 2008 SP1 hosts: all @@ -76,8 +114,9 @@ $ ansible -i hosts -m win_unzip -a "zip=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=t dest: 'C:\\463984_intl_x64_zip.exe' - name: Unzip hotfix win_unzip: - zip: "C:\\463984_intl_x64_zip.exe" + src: "C:\\463984_intl_x64_zip.exe" dest: "C:\\Hotfix" + recurse: true restart: true - name: Wait for server reboot... local_action: From 46f53724f0005411a6e2526aaef2ada3fc6d6af9 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Fri, 13 Feb 2015 13:37:16 -0500 Subject: [PATCH 011/157] Restore rax_mon_* modules. --- cloud/rackspace/rax_mon_alarm.py | 240 ++++++++++++++ cloud/rackspace/rax_mon_check.py | 323 +++++++++++++++++++ cloud/rackspace/rax_mon_entity.py | 196 +++++++++++ cloud/rackspace/rax_mon_notification.py | 187 +++++++++++ cloud/rackspace/rax_mon_notification_plan.py | 186 +++++++++++ 5 files changed, 1132 insertions(+) create mode 100644 cloud/rackspace/rax_mon_alarm.py create mode 100644 cloud/rackspace/rax_mon_check.py create mode 100644 cloud/rackspace/rax_mon_entity.py create mode 100644 cloud/rackspace/rax_mon_notification.py create mode 100644 cloud/rackspace/rax_mon_notification_plan.py diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py new file mode 100644 index 00000000000..f5fc9593abd --- /dev/null +++ b/cloud/rackspace/rax_mon_alarm.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_alarm +short_description: Create or delete a Rackspace Cloud Monitoring alarm. +description: +- Create or delete a Rackspace Cloud Monitoring alarm that associates an + existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with + criteria that specify what conditions will trigger which levels of + notifications. Rackspace monitoring module flow | rax_mon_entity -> + rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> + *rax_mon_alarm* +version_added: "1.8.2" +options: + state: + description: + - Ensure that the alarm with this C(label) exists or does not exist. + choices: [ "present", "absent" ] + required: false + default: present + label: + description: + - Friendly name for this alarm, used to achieve idempotence. Must be a String + between 1 and 255 characters long. + required: true + entity_id: + description: + - ID of the entity this alarm is attached to. May be acquired by registering + the value of a rax_mon_entity task. + required: true + check_id: + description: + - ID of the check that should be alerted on. May be acquired by registering + the value of a rax_mon_check task. + required: true + notification_plan_id: + description: + - ID of the notification plan to trigger if this alarm fires. May be acquired + by registering the value of a rax_mon_notification_plan task. + required: true + criteria: + description: + - Alarm DSL that describes alerting conditions and their output states. Must + be between 1 and 16384 characters long. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html + for a reference on the alerting language. + disabled: + description: + - If yes, create this alarm, but leave it in an inactive state. Defaults to + no. + choices: [ "yes", "no" ] + metadata: + description: + - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String + keys and values between 1 and 255 characters long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Alarm example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure that a specific alarm exists. + rax_mon_alarm: + credentials: ~/.rax_pub + state: present + label: uhoh + entity_id: "{{ the_entity['entity']['id'] }}" + check_id: "{{ the_check['check']['id'] }}" + notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" + criteria: > + if (rate(metric['average']) > 10) { + return new AlarmStatus(WARNING); + } + return new AlarmStatus(OK); + register: the_alarm +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, + disabled, metadata): + + # Verify the presence of required attributes. + + required_attrs = { + "label": label, "entity_id": entity_id, "check_id": check_id, + "notification_plan_id": notification_plan_id + } + + for (key, value) in required_attrs.iteritems(): + if not value: + module.fail_json(msg=('%s is required for rax_mon_alarm' % key)) + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if criteria and len(criteria) < 1 or len(criteria) > 16384: + module.fail_json(msg='criteria must be between 1 and 16384 characters long') + + # Coerce attributes. + + changed = False + alarm = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [a for a in cm.list_alarms(entity_id) if a.label == label] + + if existing: + alarm = existing[0] + + if state == 'present': + should_create = False + should_update = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s existing alarms have the label %s.' % + (len(existing), label)) + + if alarm: + if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: + should_delete = should_create = True + + should_update = (disabled and disabled != alarm.disabled) or \ + (metadata and metadata != alarm.metadata) or \ + (criteria and criteria != alarm.criteria) + + if should_update and not should_delete: + cm.update_alarm(entity=entity_id, alarm=alarm, + criteria=criteria, disabled=disabled, + label=label, metadata=metadata) + changed = True + + if should_delete: + alarm.delete() + changed = True + else: + should_create = True + + if should_create: + alarm = cm.create_alarm(entity=entity_id, check=check_id, + notification_plan=notification_plan_id, + criteria=criteria, disabled=disabled, label=label, + metadata=metadata) + changed = True + elif state == 'absent': + for a in existing: + a.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if alarm: + alarm_dict = { + "id": alarm.id, + "label": alarm.label, + "check_id": alarm.check_id, + "notification_plan_id": alarm.notification_plan_id, + "criteria": alarm.criteria, + "disabled": alarm.disabled, + "metadata": alarm.metadata + } + module.exit_json(changed=changed, alarm=alarm_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + entity_id=dict(), + check_id=dict(), + notification_plan_id=dict(), + criteria=dict(), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + entity_id = module.params.get('entity_id') + check_id = module.params.get('check_id') + notification_plan_id = module.params.get('notification_plan_id') + criteria = module.params.get('criteria') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + alarm(module, state, label, entity_id, check_id, notification_plan_id, + criteria, disabled, metadata) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py new file mode 100644 index 00000000000..9da283c3ba0 --- /dev/null +++ b/cloud/rackspace/rax_mon_check.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_check +short_description: Create or delete a Rackspace Cloud Monitoring check for an + existing entity. +description: +- Create or delete a Rackspace Cloud Monitoring check associated with an + existing rax_mon_entity. A check is a specific test or measurement that is + performed, possibly from different monitoring zones, on the systems you + monitor. Rackspace monitoring module flow | rax_mon_entity -> + *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> + rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that a check with this C(label) exists or does not exist. + choices: ["present", "absent"] + entity_id: + description: + - ID of the rax_mon_entity to target with this check. + required: true + label: + description: + - Defines a label for this check, between 1 and 64 characters long. + required: true + check_type: + description: + - The type of check to create. C(remote.) checks may be created on any + rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities + that have a non-null C(agent_id). + choices: + - remote.dns + - remote.ftp-banner + - remote.http + - remote.imap-banner + - remote.mssql-banner + - remote.mysql-banner + - remote.ping + - remote.pop3-banner + - remote.postgresql-banner + - remote.smtp-banner + - remote.smtp + - remote.ssh + - remote.tcp + - remote.telnet-banner + - agent.filesystem + - agent.memory + - agent.load_average + - agent.cpu + - agent.disk + - agent.network + - agent.plugin + required: true + monitoring_zones_poll: + description: + - Comma-separated list of the names of the monitoring zones the check should + run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, + mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. + target_hostname: + description: + - One of `target_hostname` and `target_alias` is required for remote.* checks, + but prohibited for agent.* checks. The hostname this check should target. + Must be a valid IPv4, IPv6, or FQDN. + target_alias: + description: + - One of `target_alias` and `target_hostname` is required for remote.* checks, + but prohibited for agent.* checks. Use the corresponding key in the entity's + `ip_addresses` hash to resolve an IP address to target. + details: + description: + - Additional details specific to the check type. Must be a hash of strings + between 1 and 255 characters long, or an array or object containing 0 to + 256 items. + disabled: + description: + - If "yes", ensure the check is created, but don't actually use it yet. + choices: [ "yes", "no" ] + metadata: + description: + - Hash of arbitrary key-value pairs to accompany this check if it fires. + Keys and values must be strings between 1 and 255 characters long. + period: + description: + - The number of seconds between each time the check is performed. Must be + greater than the minimum period set on your account. + timeout: + description: + - The number of seconds this check will wait when attempting to collect + results. Must be less than the period. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Create a monitoring check + gather_facts: False + hosts: local + connection: local + tasks: + - name: Associate a check with an existing entity. + rax_mon_check: + credentials: ~/.rax_pub + state: present + entity_id: "{{ the_entity['entity']['id'] }}" + label: the_check + check_type: remote.ping + monitoring_zones_poll: mziad,mzord,mzdfw + details: + count: 10 + meta: + hurf: durf + register: the_check +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout): + + # Verify the presence of required attributes. + + required_attrs = { + "entity_id": entity_id, "label": label, "check_type": check_type + } + + for (key, value) in required_attrs.iteritems(): + if not value: + module.fail_json(msg=('%s is required for rax_mon_check' % key)) + + # Coerce attributes. + + if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): + monitoring_zones_poll = [monitoring_zones_poll] + + if period: + period = int(period) + + if timeout: + timeout = int(timeout) + + changed = False + check = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + entity = cm.get_entity(entity_id) + if not entity: + module.fail_json(msg='Failed to instantiate entity. "%s" may not be' + ' a valid entity id.' % entity_id) + + existing = [e for e in entity.list_checks() if e.label == label] + + if existing: + check = existing[0] + + if state == 'present': + if len(existing) > 1: + module.fail_json(msg='%s existing checks have a label of %s.' % + (len(existing), label)) + + should_delete = False + should_create = False + should_update = False + + if check: + # Details may include keys set to default values that are not + # included in the initial creation. + # + # Only force a recreation of the check if one of the *specified* + # keys is missing or has a different value. + if details: + for (key, value) in details.iteritems(): + if key not in check.details: + should_delete = should_create = True + elif value != check.details[key]: + should_delete = should_create = True + + should_update = label != check.label or \ + (target_hostname and target_hostname != check.target_hostname) or \ + (target_alias and target_alias != check.target_alias) or \ + (disabled != check.disabled) or \ + (metadata and metadata != check.metadata) or \ + (period and period != check.period) or \ + (timeout and timeout != check.timeout) or \ + (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) + + if should_update and not should_delete: + check.update(label=label, + disabled=disabled, + metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, + timeout=timeout, + period=period, + target_alias=target_alias, + target_hostname=target_hostname) + changed = True + else: + # The check doesn't exist yet. + should_create = True + + if should_delete: + check.delete() + + if should_create: + check = cm.create_check(entity, + label=label, + check_type=check_type, + target_hostname=target_hostname, + target_alias=target_alias, + monitoring_zones_poll=monitoring_zones_poll, + details=details, + disabled=disabled, + metadata=metadata, + period=period, + timeout=timeout) + changed = True + elif state == 'absent': + if check: + check.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if check: + check_dict = { + "id": check.id, + "label": check.label, + "type": check.type, + "target_hostname": check.target_hostname, + "target_alias": check.target_alias, + "monitoring_zones_poll": check.monitoring_zones_poll, + "details": check.details, + "disabled": check.disabled, + "metadata": check.metadata, + "period": check.period, + "timeout": check.timeout + } + module.exit_json(changed=changed, check=check_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + entity_id=dict(), + label=dict(), + check_type=dict(), + monitoring_zones_poll=dict(), + target_hostname=dict(), + target_alias=dict(), + details=dict(type='dict', default={}), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict', default={}), + period=dict(type='int'), + timeout=dict(type='int'), + state=dict(default='present') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + entity_id = module.params.get('entity_id') + label = module.params.get('label') + check_type = module.params.get('check_type') + monitoring_zones_poll = module.params.get('monitoring_zones_poll') + target_hostname = module.params.get('target_hostname') + target_alias = module.params.get('target_alias') + details = module.params.get('details') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + period = module.params.get('period') + timeout = module.params.get('timeout') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py new file mode 100644 index 00000000000..8b95c291914 --- /dev/null +++ b/cloud/rackspace/rax_mon_entity.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_entity +short_description: Create or delete a Rackspace Cloud Monitoring entity +description: +- Create or delete a Rackspace Cloud Monitoring entity, which represents a device + to monitor. Entities associate checks and alarms with a target system and + provide a convenient, centralized place to store IP addresses. Rackspace + monitoring module flow | *rax_mon_entity* -> rax_mon_check -> + rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "1.8.2" +options: + label: + description: + - Defines a name for this entity. Must be a non-empty string between 1 and + 255 characters long. + required: true + state: + description: + - Ensure that an entity with this C(name) exists or does not exist. + choices: ["present", "absent"] + agent_id: + description: + - Rackspace monitoring agent on the target device to which this entity is + bound. Necessary to collect C(agent.) rax_mon_checks against this entity. + named_ip_addresses: + description: + - Hash of IP addresses that may be referenced by name by rax_mon_checks + added to this entity. Must be a dictionary of with keys that are names + between 1 and 64 characters long, and values that are valid IPv4 or IPv6 + addresses. + metadata: + description: + - Hash of arbitrary C(name), C(value) pairs that are passed to associated + rax_mon_alarms. Names and values must all be between 1 and 255 characters + long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Entity example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure an entity exists + rax_mon_entity: + credentials: ~/.rax_pub + state: present + label: my_entity + named_ip_addresses: + web_box: 192.168.0.10 + db_box: 192.168.0.11 + meta: + hurf: durf + register: the_entity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, + metadata): + if not label: + module.fail_json(msg='label is required for rax_mon_entity') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for entity in cm.list_entities(): + if label == entity.label: + existing.append(entity) + + entity = None + + if existing: + entity = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing entities have the label %s.' % + (len(existing), label)) + + if entity: + if named_ip_addresses and named_ip_addresses != entity.ip_addresses: + should_delete = should_create = True + + # Change an existing Entity, unless there's nothing to do. + should_update = agent_id and agent_id != entity.agent_id or \ + (metadata and metadata != entity.metadata) + + if should_update and not should_delete: + entity.update(agent_id, metadata) + changed = True + + if should_delete: + entity.delete() + else: + should_create = True + + if should_create: + # Create a new Entity. + entity = cm.create_entity(label=label, agent=agent_id, + ip_addresses=named_ip_addresses, + metadata=metadata) + changed = True + elif state == 'absent': + # Delete the existing Entities. + for e in existing: + e.delete() + changed = True + else: + module.fail_json(msg='state must be present or absent') + + if entity: + entity_dict = { + "id": entity.id, + "name": entity.name, + "agent_id": entity.agent_id, + } + module.exit_json(changed=changed, entity=entity_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + agent_id=dict(), + named_ip_addresses=dict(type='dict', default={}), + metadata=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + agent_id = module.params.get('agent_id') + named_ip_addresses = module.params.get('named_ip_addresses') + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py new file mode 100644 index 00000000000..74c4319255b --- /dev/null +++ b/cloud/rackspace/rax_mon_notification.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification +short_description: Create or delete a Rackspace Cloud Monitoring notification. +description: +- Create or delete a Rackspace Cloud Monitoring notification that specifies a + channel that can be used to communicate alarms, such as email, webhooks, or + PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> + *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that the notification with this C(label) exists or does not exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification. String between 1 and 255 + characters long. + required: true + notification_type: + description: + - A supported notification type. + choices: ["webhook", "email", "pagerduty"] + required: true + details: + description: + - Dictionary of key-value pairs used to initialize the notification. + Required keys and meanings vary with notification type. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ + service-notification-types-crud.html for details. + required: true +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Monitoring notification example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Email me when something goes wrong. + rax_mon_entity: + credentials: ~/.rax_pub + label: omg + type: email + details: + address: me@mailhost.com + register: the_notification +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification(module, state, label, notification_type, details): + + if not label: + module.fail_json(msg='label is required for rax_mon_notification') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if not notification_type: + module.fail_json(msg='you must provide a notification_type') + + if not details: + module.fail_json(msg='notification details are required') + + changed = False + notification = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notifications(): + if n.label == label: + existing.append(n) + + if existing: + notification = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing notifications are labelled %s.' % + (len(existing), label)) + + if notification: + should_delete = (notification_type != notification.type) + + should_update = (details != notification.details) + + if should_update and not should_delete: + notification.update(details=notification.details) + changed = True + + if should_delete: + notification.delete() + else: + should_create = True + + if should_create: + notification = cm.create_notification(notification_type, + label=label, details=details) + changed = True + elif state == 'absent': + for n in existing: + n.delete() + changed = True + else: + module.fail_json(msg='state must be either "present" or "absent"') + + if notification: + notification_dict = { + "id": notification.id, + "type": notification.type, + "label": notification.label, + "details": notification.details + } + module.exit_json(changed=changed, notification=notification_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + notification_type=dict(), + details=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + notification_type = module.params.get('notification_type') + details = module.params.get('details') + + setup_rax_module(module, pyrax) + + notification(module, state, label, notification_type, details) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py new file mode 100644 index 00000000000..c8d4d215292 --- /dev/null +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification_plan +short_description: Create or delete a Rackspace Cloud Monitoring notification + plan. +description: +- Create or delete a Rackspace Cloud Monitoring notification plan by + associating existing rax_mon_notifications with severity levels. Rackspace + monitoring module flow | rax_mon_entity -> rax_mon_check -> + rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that the notification plan with this C(label) exists or does not + exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification plan. String between 1 and + 255 characters long. + required: true + critical_state: + description: + - Notification list to use when the alarm state is CRITICAL. Must be an + array of valid rax_mon_notification ids. + warning_state: + description: + - Notification list to use when the alarm state is WARNING. Must be an array + of valid rax_mon_notification ids. + ok_state: + description: + - Notification list to use when the alarm state is OK. Must be an array of + valid rax_mon_notification ids. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Example notification plan + gather_facts: False + hosts: local + connection: local + tasks: + - name: Establish who gets called when. + rax_mon_notification_plan: + credentials: ~/.rax_pub + state: present + label: defcon1 + critical_state: + - "{{ everyone['notification']['id'] }}" + warning_state: + - "{{ opsfloor['notification']['id'] }}" + register: defcon1 +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification_plan(module, state, label, critical_state, warning_state, ok_state): + + if not label: + module.fail_json(msg='label is required for rax_mon_notification_plan') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification_plan = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notification_plans(): + if n.label == label: + existing.append(n) + + if existing: + notification_plan = existing[0] + + if state == 'present': + should_create = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s notification plans are labelled %s.' % + (len(existing), label)) + + if notification_plan: + should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ + (warning_state and warning_state != notification_plan.warning_state) or \ + (ok_state and ok_state != notification_plan.ok_state) + + if should_delete: + notification_plan.delete() + should_create = True + else: + should_create = True + + if should_create: + notification_plan = cm.create_notification_plan(label=label, + critical_state=critical_state, + warning_state=warning_state, + ok_state=ok_state) + changed = True + elif state == 'absent': + for np in existing: + np.delete() + changed = True + else: + module.fail_json(msg='state must be either "present" or "absent"') + + if notification_plan: + notification_plan_dict = { + "id": notification_plan.id, + "critical_state": notification_plan.critical_state, + "warning_state": notification_plan.warning_state, + "ok_state": notification_plan.ok_state, + "metadata": notification_plan.metadata + } + module.exit_json(changed=changed, notification_plan=notification_plan_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + critical_state=dict(type='list'), + warning_state=dict(type='list'), + ok_state=dict(type='list') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + critical_state = module.params.get('critical_state') + warning_state = module.params.get('warning_state') + ok_state = module.params.get('ok_state') + + setup_rax_module(module, pyrax) + + notification_plan(module, state, label, critical_state, warning_state, ok_state) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() From 817f603b6be58d2f44a5c0713d03a5377181915e Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Tue, 17 Feb 2015 13:33:43 -0500 Subject: [PATCH 012/157] Initial implementation of rax_clb_ssl. --- cloud/rackspace/rax_clb_ssl.py | 284 +++++++++++++++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 cloud/rackspace/rax_clb_ssl.py diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py new file mode 100644 index 00000000000..d93e2f594e7 --- /dev/null +++ b/cloud/rackspace/rax_clb_ssl.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION=''' +module: rax_clb_ssl +short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. +description: +- Set up, reconfigure, or remove SSL termination for an existing load balancer. +version_added: "1.8.2" +options: + balancer_name: + description: + - Name or ID of the load balancer on which to manage SSL termination. + required: true + state: + description: + - If set to "present", SSL termination will be added to this load balancer. + - If "absent", SSL termination will be removed instead. + choices: + - present + - absent + default: present + enabled: + description: + - If set to "false", temporarily disable SSL termination without discarding + - existing credentials. + default: true + private_key: + description: + - The private SSL key as a string in PEM format. + certificate: + description: + - The public SSL certificates as a string in PEM format. + intermediate_certificate: + description: + - One or more intermediate certificate authorities as a string in PEM + - format, concatenated into a single string. + secure_port: + description: + - The port to listen for secure traffic. + default: 443 + secure_traffic_only: + description: + - If "true", the load balancer will *only* accept secure traffic. + default: false + https_redirect: + description: + - If "true", the load balancer will redirect HTTP traffic to HTTPS. + - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL + - termination is also applied or removed. + wait: + description: + - Wait for the balancer to be in state "running" before turning. + default: false + wait_timeout: + description: + - How long before "wait" gives up, in seconds. + default: 300 +author: Ash Wilson +extends_documentation_fragment: rackspace +''' + +EXAMPLES = ''' +- name: Enable SSL termination on a load balancer + rax_clb_ssl: + balancer_name: the_loadbalancer + state: present + private_key: "{{ lookup('file', 'credentials/server.key' ) }}" + certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" + intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" + secure_traffic_only: true + wait: true + +- name: Disable SSL termination + rax_clb_ssl: + balancer_name: "{{ registered_lb.balancer.id }}" + state: absent + wait: true +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_load_balancer_ssl(module, balancer_name, state, enabled, private_key, + certificate, intermediate_certificate, secure_port, + secure_traffic_only, https_redirect, + wait, wait_timeout): + # Validate arguments. + + if not balancer_name: + module.fail_json(msg='balancer_name is required.') + + if state == 'present': + if not private_key: + module.fail_json(msg="private_key must be provided.") + else: + private_key = private_key.strip() + + if not certificate: + module.fail_json(msg="certificate must be provided.") + else: + certificate = certificate.strip() + + if state not in ('present', 'absent'): + module.fail_json(msg="State must be either 'present' or 'absent'.") + + attempts = wait_timeout / 5 + + # Locate the load balancer. + + clb = pyrax.cloud_loadbalancers + if not clb: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + balancers = [] + for balancer in clb.list(): + if balancer_name == balancer.name or balancer_name == str(balancer.id): + balancers.append(balancer) + + if not balancers: + module.fail_json(msg='No load balancers matched your criteria. ' + 'Use rax_clb to create the balancer first.') + + if len(balancers) > 1: + module.fail_json(msg="%d load balancers were matched your criteria. Try" + "using the balancer's id instead." % len(balancers)) + + balancer = balancers[0] + existing_ssl = balancer.get_ssl_termination() + + changed = False + + if state == 'present': + # Apply or reconfigure SSL termination on the load balancer. + ssl_attrs = dict( + securePort=secure_port, + privatekey=private_key, + certificate=certificate, + intermediateCertificate=intermediate_certificate, + enabled=enabled, + secureTrafficOnly=secure_traffic_only + ) + + needs_change = False + + if existing_ssl: + for ssl_attr, value in ssl_attrs.iteritems(): + if ssl_attr == 'privatekey': + # The private key is not included in get_ssl_termination's + # output (as it shouldn't be). Also, if you're changing the + # private key, you'll also be changing the certificate, + # so we don't lose anything by not checking it. + continue + + if value is not None and existing_ssl.get(ssl_attr) != value: + # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) + needs_change = True + else: + needs_change = True + + if needs_change: + balancer.add_ssl_termination(**ssl_attrs) + changed = True + elif state == 'absent': + # Remove SSL termination if it's already configured. + if existing_ssl: + balancer.delete_ssl_termination() + changed = True + + if https_redirect is not None and balancer.httpsRedirect != https_redirect: + if changed: + # This wait is unavoidable because load balancers are immutable + # while the SSL termination changes above are being applied. + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.update(httpsRedirect=https_redirect) + changed = True + + if changed and wait: + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.get() + new_ssl_termination = balancer.get_ssl_termination() + + # Intentionally omit the private key from the module output, so you don't + # accidentally echo it with `ansible-playbook -v` or `debug`, and the + # certificate, which is just long. Convert other attributes to snake_case + # and include https_redirect at the top-level. + if new_ssl_termination: + new_ssl = dict( + enabled=new_ssl_termination['enabled'], + secure_port=new_ssl_termination['securePort'], + secure_traffic_only=new_ssl_termination['secureTrafficOnly'] + ) + else: + new_ssl = None + + result = dict( + changed=changed, + https_redirect=balancer.httpsRedirect, + ssl_termination=new_ssl + ) + success = True + + if balancer.status == 'ERROR': + result['msg'] = '%s failed to build' % balancer.id + success = False + elif wait and balancer.status not in ('ACTIVE', 'ERROR'): + result['msg'] = 'Timeout waiting on %s' % balancer.id + success = False + + if success: + module.exit_json(**result) + else: + module.fail_json(**result) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update(dict( + balancer_name=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + enabled=dict(type='bool', default=True), + private_key=dict(), + certificate=dict(), + intermediate_certificate=dict(), + secure_port=dict(type='int', default=443), + secure_traffic_only=dict(type='bool', default=False), + https_redirect=dict(type='bool'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module.') + + balancer_name = module.params.get('balancer_name') + state = module.params.get('state') + enabled = module.boolean(module.params.get('enabled')) + private_key = module.params.get('private_key') + certificate = module.params.get('certificate') + intermediate_certificate = module.params.get('intermediate_certificate') + secure_port = module.params.get('secure_port') + secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) + https_redirect = module.boolean(module.params.get('https_redirect')) + wait = module.boolean(module.params.get('wait')) + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_load_balancer_ssl( + module, balancer_name, state, enabled, private_key, certificate, + intermediate_certificate, secure_port, secure_traffic_only, + https_redirect, wait, wait_timeout + ) + +main() From 4c4c0bb11909b4219f8bbe3abc1fce5cfff50e20 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 19 Feb 2015 13:36:33 -0500 Subject: [PATCH 013/157] Use the correct version_added. --- cloud/rackspace/rax_clb_ssl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index d93e2f594e7..7a27f93116f 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -21,7 +21,7 @@ module: rax_clb_ssl short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. description: - Set up, reconfigure, or remove SSL termination for an existing load balancer. -version_added: "1.8.2" +version_added: "1.9" options: balancer_name: description: From 015ffbf9a90c776f8b222e61510b5b45c4fa6e9b Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 19 Feb 2015 13:37:28 -0500 Subject: [PATCH 014/157] Move ansible imports to the module's bottom. --- cloud/rackspace/rax_clb_ssl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index 7a27f93116f..d011432e066 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -93,9 +93,6 @@ EXAMPLES = ''' wait: true ''' -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - try: import pyrax HAS_PYRAX = True @@ -281,4 +278,7 @@ def main(): https_redirect, wait, wait_timeout ) +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + main() From 0380490ae9561d414684602f8a0a5323b98949d8 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 19 Feb 2015 13:41:03 -0500 Subject: [PATCH 015/157] Rename "balancer_name" to "loadbalancer." --- cloud/rackspace/rax_clb_ssl.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index d011432e066..2195d08b938 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -23,7 +23,7 @@ description: - Set up, reconfigure, or remove SSL termination for an existing load balancer. version_added: "1.9" options: - balancer_name: + loadbalancer: description: - Name or ID of the load balancer on which to manage SSL termination. required: true @@ -99,15 +99,12 @@ try: except ImportError: HAS_PYRAX = False -def cloud_load_balancer_ssl(module, balancer_name, state, enabled, private_key, +def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, certificate, intermediate_certificate, secure_port, secure_traffic_only, https_redirect, wait, wait_timeout): # Validate arguments. - if not balancer_name: - module.fail_json(msg='balancer_name is required.') - if state == 'present': if not private_key: module.fail_json(msg="private_key must be provided.") @@ -134,7 +131,7 @@ def cloud_load_balancer_ssl(module, balancer_name, state, enabled, private_key, balancers = [] for balancer in clb.list(): - if balancer_name == balancer.name or balancer_name == str(balancer.id): + if loadbalancer == balancer.name or loadbalancer == str(balancer.id): balancers.append(balancer) if not balancers: @@ -237,7 +234,7 @@ def cloud_load_balancer_ssl(module, balancer_name, state, enabled, private_key, def main(): argument_spec = rax_argument_spec() argument_spec.update(dict( - balancer_name=dict(type='str'), + loadbalancer=dict(required=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(type='bool', default=True), private_key=dict(), @@ -258,7 +255,7 @@ def main(): if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module.') - balancer_name = module.params.get('balancer_name') + loadbalancer = module.params.get('loadbalancer') state = module.params.get('state') enabled = module.boolean(module.params.get('enabled')) private_key = module.params.get('private_key') @@ -273,7 +270,7 @@ def main(): setup_rax_module(module, pyrax) cloud_load_balancer_ssl( - module, balancer_name, state, enabled, private_key, certificate, + module, loadbalancer, state, enabled, private_key, certificate, intermediate_certificate, secure_port, secure_traffic_only, https_redirect, wait, wait_timeout ) From 1a8ed52819f03a3a3ebde3e1f81e25f35e231fa3 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 19 Feb 2015 13:41:29 -0500 Subject: [PATCH 016/157] Remove redundant "state" validity check. --- cloud/rackspace/rax_clb_ssl.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index 2195d08b938..cff30d67b5e 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -116,9 +116,6 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, else: certificate = certificate.strip() - if state not in ('present', 'absent'): - module.fail_json(msg="State must be either 'present' or 'absent'.") - attempts = wait_timeout / 5 # Locate the load balancer. From e1cdda56ff697a891541efd04acf39a9f4dcac64 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 19 Feb 2015 13:48:03 -0500 Subject: [PATCH 017/157] Use rax_find_loadbalancer utility method. --- cloud/rackspace/rax_clb_ssl.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index cff30d67b5e..f16118c20f4 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -120,26 +120,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, # Locate the load balancer. - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - balancers = [] - for balancer in clb.list(): - if loadbalancer == balancer.name or loadbalancer == str(balancer.id): - balancers.append(balancer) - - if not balancers: - module.fail_json(msg='No load balancers matched your criteria. ' - 'Use rax_clb to create the balancer first.') - - if len(balancers) > 1: - module.fail_json(msg="%d load balancers were matched your criteria. Try" - "using the balancer's id instead." % len(balancers)) - - balancer = balancers[0] + balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) existing_ssl = balancer.get_ssl_termination() changed = False From 65a1129ef9800c2f094f07b84677d33b762337cb Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:18:35 -0600 Subject: [PATCH 018/157] Correct version_added in the documentation. --- cloud/rackspace/rax_mon_alarm.py | 2 +- cloud/rackspace/rax_mon_check.py | 2 +- cloud/rackspace/rax_mon_entity.py | 2 +- cloud/rackspace/rax_mon_notification.py | 2 +- cloud/rackspace/rax_mon_notification_plan.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f5fc9593abd..aa742d02bd8 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -27,7 +27,7 @@ description: notifications. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> *rax_mon_alarm* -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 9da283c3ba0..3f86da93ab6 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -28,7 +28,7 @@ description: monitor. Rackspace monitoring module flow | rax_mon_entity -> *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 8b95c291914..9d20252b0e5 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -26,7 +26,7 @@ description: provide a convenient, centralized place to store IP addresses. Rackspace monitoring module flow | *rax_mon_entity* -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: label: description: diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 74c4319255b..475eb345f51 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -25,7 +25,7 @@ description: channel that can be used to communicate alarms, such as email, webhooks, or PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index c8d4d215292..b81b00f7d18 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -26,7 +26,7 @@ description: associating existing rax_mon_notifications with severity levels. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: From 205e4e5530699809713fdcada0ee477abb68fb50 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:25:51 -0600 Subject: [PATCH 019/157] Use required=True and choices=[]. --- cloud/rackspace/rax_mon_alarm.py | 10 +++++----- cloud/rackspace/rax_mon_check.py | 8 ++++---- cloud/rackspace/rax_mon_entity.py | 4 ++-- cloud/rackspace/rax_mon_notification.py | 8 ++++---- cloud/rackspace/rax_mon_notification_plan.py | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index aa742d02bd8..f4d2a9398a5 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -198,11 +198,11 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), - entity_id=dict(), - check_id=dict(), - notification_plan_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + entity_id=dict(required=True), + check_id=dict(required=True), + notification_plan_id=dict(required=True), criteria=dict(), disabled=dict(type='bool', default=False), metadata=dict(type='dict') diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 3f86da93ab6..27798e6cd5a 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -271,9 +271,9 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - entity_id=dict(), - label=dict(), - check_type=dict(), + entity_id=dict(required=True), + label=dict(required=True), + check_type=dict(required=True), monitoring_zones_poll=dict(), target_hostname=dict(), target_alias=dict(), @@ -282,7 +282,7 @@ def main(): metadata=dict(type='dict', default={}), period=dict(type='int'), timeout=dict(type='int'), - state=dict(default='present') + state=dict(default='present', choices=['present', 'absent']) ) ) diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 9d20252b0e5..b1bd13c61ad 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -161,8 +161,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), agent_id=dict(), named_ip_addresses=dict(type='dict', default={}), metadata=dict(type='dict', default={}) diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 475eb345f51..6962b14b3e6 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -154,10 +154,10 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), - notification_type=dict(), - details=dict(type='dict', default={}) + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), + details=dict(required=True, type='dict') ) ) diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index b81b00f7d18..1bb5052c8f2 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -151,8 +151,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), critical_state=dict(type='list'), warning_state=dict(type='list'), ok_state=dict(type='list') From c0549335135e33f1cbd49575e8e7428647a06e28 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:33:02 -0600 Subject: [PATCH 020/157] Eliminate redundant module argument checks. --- cloud/rackspace/rax_mon_alarm.py | 15 +-------------- cloud/rackspace/rax_mon_check.py | 10 ---------- cloud/rackspace/rax_mon_entity.py | 6 +----- cloud/rackspace/rax_mon_notification.py | 13 +------------ cloud/rackspace/rax_mon_notification_plan.py | 7 +------ 5 files changed, 4 insertions(+), 47 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f4d2a9398a5..f9b97bc8dd1 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -105,17 +105,6 @@ except ImportError: def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, disabled, metadata): - # Verify the presence of required attributes. - - required_attrs = { - "label": label, "entity_id": entity_id, "check_id": check_id, - "notification_plan_id": notification_plan_id - } - - for (key, value) in required_attrs.iteritems(): - if not value: - module.fail_json(msg=('%s is required for rax_mon_alarm' % key)) - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -173,12 +162,10 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite criteria=criteria, disabled=disabled, label=label, metadata=metadata) changed = True - elif state == 'absent': + else: for a in existing: a.delete() changed = True - else: - module.fail_json(msg='state must be either present or absent.') if alarm: alarm_dict = { diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 27798e6cd5a..101efd3c858 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -141,16 +141,6 @@ def cloud_check(module, state, entity_id, label, check_type, monitoring_zones_poll, target_hostname, target_alias, details, disabled, metadata, period, timeout): - # Verify the presence of required attributes. - - required_attrs = { - "entity_id": entity_id, "label": label, "check_type": check_type - } - - for (key, value) in required_attrs.iteritems(): - if not value: - module.fail_json(msg=('%s is required for rax_mon_check' % key)) - # Coerce attributes. if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index b1bd13c61ad..5f82ff9c524 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -83,8 +83,6 @@ except ImportError: def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata): - if not label: - module.fail_json(msg='label is required for rax_mon_entity') if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -139,13 +137,11 @@ def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, ip_addresses=named_ip_addresses, metadata=metadata) changed = True - elif state == 'absent': + else: # Delete the existing Entities. for e in existing: e.delete() changed = True - else: - module.fail_json(msg='state must be present or absent') if entity: entity_dict = { diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 6962b14b3e6..8a21b088c5e 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -76,18 +76,9 @@ except ImportError: def notification(module, state, label, notification_type, details): - if not label: - module.fail_json(msg='label is required for rax_mon_notification') - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') - if not notification_type: - module.fail_json(msg='you must provide a notification_type') - - if not details: - module.fail_json(msg='notification details are required') - changed = False notification = None @@ -132,12 +123,10 @@ def notification(module, state, label, notification_type, details): notification = cm.create_notification(notification_type, label=label, details=details) changed = True - elif state == 'absent': + else: for n in existing: n.delete() changed = True - else: - module.fail_json(msg='state must be either "present" or "absent"') if notification: notification_dict = { diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index 1bb5052c8f2..05b89b2cfb3 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -80,9 +80,6 @@ except ImportError: def notification_plan(module, state, label, critical_state, warning_state, ok_state): - if not label: - module.fail_json(msg='label is required for rax_mon_notification_plan') - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -128,12 +125,10 @@ def notification_plan(module, state, label, critical_state, warning_state, ok_st warning_state=warning_state, ok_state=ok_state) changed = True - elif state == 'absent': + else: for np in existing: np.delete() changed = True - else: - module.fail_json(msg='state must be either "present" or "absent"') if notification_plan: notification_plan_dict = { From c6d56809670b8e486a7f9e420edce178dda8f543 Mon Sep 17 00:00:00 2001 From: schwartzmx Date: Tue, 14 Apr 2015 23:51:02 -0500 Subject: [PATCH 021/157] fixes unzip bug for zip files, thanks to @ryanwalls - also fixes possible import errors, and switches to use Start-Process on install to correctly wait --- windows/win_unzip.ps1 | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index f31a6273a39..8e6db762fe1 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -33,7 +33,7 @@ If ($params.src) { Fail-Json $result "src file: $src does not exist." } - $ext = [System.IO.Path]::GetExtension($dest) + $ext = [System.IO.Path]::GetExtension($src) } Else { Fail-Json $result "missing required argument: src" @@ -93,7 +93,7 @@ Else { If (-Not ($list -match "PSCX")) { # Try install with chocolatey Try { - cinst -force PSCX + cinst -force PSCX -y $choco = $true } Catch { @@ -109,9 +109,7 @@ Else { Fail-Json $result "Error downloading PSCX from $url and saving as $dest" } Try { - msiexec.exe /i $msi /qb - # Give it a chance to install, so that it can be imported - sleep 10 + Start-Process -FilePath msiexec.exe -ArgumentList "/i $msi /qb" -Verb Runas -PassThru -Wait | out-null } Catch { Fail-Json $result "Error installing $msi" @@ -127,7 +125,12 @@ Else { # Import Try { If ($installed) { - Import-Module 'C:\Program Files (x86)\Powershell Community Extensions\pscx3\pscx\pscx.psd1' + Try { + Import-Module 'C:\Program Files (x86)\Powershell Community Extensions\pscx3\pscx\pscx.psd1' + } + Catch { + Import-Module PSCX + } } Else { Import-Module PSCX From 91483bdd6b9a3dd0c0ad047a1209801068afcb27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Wallstro=CC=88m?= Date: Fri, 24 Apr 2015 10:48:02 +0200 Subject: [PATCH 022/157] Modules to manage IIS Wraps the Web Server Administration module for PowerShell into Ansible modules. --- windows/win_iis_virtualdirectory.ps1 | 128 +++++++++++++++++++ windows/win_iis_virtualdirectory.py | 67 ++++++++++ windows/win_iis_webapplication.ps1 | 132 ++++++++++++++++++++ windows/win_iis_webapplication.py | 68 ++++++++++ windows/win_iis_webapppool.ps1 | 112 +++++++++++++++++ windows/win_iis_webapppool.py | 112 +++++++++++++++++ windows/win_iis_webbinding.ps1 | 138 +++++++++++++++++++++ windows/win_iis_webbinding.py | 143 +++++++++++++++++++++ windows/win_iis_website.ps1 | 179 +++++++++++++++++++++++++++ windows/win_iis_website.py | 133 ++++++++++++++++++++ 10 files changed, 1212 insertions(+) create mode 100644 windows/win_iis_virtualdirectory.ps1 create mode 100644 windows/win_iis_virtualdirectory.py create mode 100644 windows/win_iis_webapplication.ps1 create mode 100644 windows/win_iis_webapplication.py create mode 100644 windows/win_iis_webapppool.ps1 create mode 100644 windows/win_iis_webapppool.py create mode 100644 windows/win_iis_webbinding.ps1 create mode 100644 windows/win_iis_webbinding.py create mode 100644 windows/win_iis_website.ps1 create mode 100644 windows/win_iis_website.py diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1 new file mode 100644 index 00000000000..3f2ab692b42 --- /dev/null +++ b/windows/win_iis_virtualdirectory.ps1 @@ -0,0 +1,128 @@ +#!powershell +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# Application +$application = Get-Attr $params "application" $FALSE; + +# State parameter +$state = Get-Attr $params "state" "present"; +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + directory = New-Object psobject + changed = $false +}; + +# Construct path +$directory_path = if($application) { + "IIS:\Sites\$($site)\$($application)\$($name)" +} else { + "IIS:\Sites\$($site)\$($name)" +} + +# Directory info +$directory = Get-WebVirtualDirectory -Site $site -Name $name + +try { + # Add directory + If(($state -eq 'present') -and (-not $directory)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $directory_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application) { + $directory_parameters.Application = $application + } + + $directory = New-WebVirtualDirectory @directory_parameters -Force + $result.changed = $true + } + + # Remove directory + If ($state -eq 'absent' -and $directory) { + Remove-Item $directory_path + $result.changed = $true + } + + $directory = Get-WebVirtualDirectory -Site $site -Name $name + If($directory) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $vdir_folder = Get-Item $directory.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $vdir_folder.FullName) { + Set-ItemProperty $directory_path -name physicalPath -value $physical_path + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$directory = Get-WebVirtualDirectory -Site $site -Name $name +$result.directory = New-Object psobject @{ + PhysicalPath = $directory.PhysicalPath +} + +Exit-Json $result diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py new file mode 100644 index 00000000000..bbedfbbb4ab --- /dev/null +++ b/windows/win_iis_virtualdirectory.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_virtualdirectory +version_added: "1.9" +short_description: Configures a IIS virtual directories. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - The name of the virtual directory to create. + required: true + default: null + aliases: [] + state: + description: + - + choices: + - absent + - present + required: false + default: null + aliases: [] + site: + description: + - The site name under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + application: + description: + - The application under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' + +''' diff --git a/windows/win_iis_webapplication.ps1 b/windows/win_iis_webapplication.ps1 new file mode 100644 index 00000000000..e576dd5081c --- /dev/null +++ b/windows/win_iis_webapplication.ps1 @@ -0,0 +1,132 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# State parameter +$state = Get-Attr $params "state" "present"; +$state.ToString().ToLower(); +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + application = New-Object psobject + changed = $false +}; + +# Application info +$application = Get-WebApplication -Site $site -Name $name + +try { + # Add application + If(($state -eq 'present') -and (-not $application)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $application_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $application_parameters.ApplicationPool = $application_pool + } + + $application = New-WebApplication @application_parameters -Force + $result.changed = $true + + } + + # Remove application + if ($state -eq 'absent' -and $application) { + $application = Remove-WebApplication -Site $site -Name $name + $result.changed = $true + } + + $application = Get-WebApplication -Site $site -Name $name + If($application) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $app_folder = Get-Item $application.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $app_folder.FullName) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name physicalPath -value $physical_path + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $application.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$application = Get-WebApplication -Site $site -Name $name +$result.application = New-Object psobject @{ + PhysicalPath = $application.PhysicalPath + ApplicationPool = $application.applicationPool +} + +Exit-Json $result diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py new file mode 100644 index 00000000000..d8a59b66054 --- /dev/null +++ b/windows/win_iis_webapplication.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_website +version_added: "1.9" +short_description: Configures a IIS Web application. +description: + - Creates, Removes and configures a IIS Web applications +options: + name: + description: + - Name of the Web applicatio + required: true + default: null + aliases: [] + site: + description: + - Name of the site on which the application is created. + required: true + default: null + aliases: [] + state: + description: + - State of the web application + choices: + - present + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host + +''' diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1 new file mode 100644 index 00000000000..2ed369e4a3f --- /dev/null +++ b/windows/win_iis_webapppool.ps1 @@ -0,0 +1,112 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ('started', 'restarted', 'stopped', 'absent'); +If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +# Attributes parameter - Pipe separated list of attributes where +# keys and values are separated by comma (paramA:valyeA|paramB:valueB) +$attributes = @{}; +If ($params.attributes) { + $params.attributes -split '\|' | foreach { + $key, $value = $_ -split "\:"; + $attributes.Add($key, $value); + } +} + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $NULL){ + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + changed = $FALSE + attributes = $attributes +}; + +# Get pool +$pool = Get-Item IIS:\AppPools\$name + +try { + # Add + if (-not $pool -and $state -in ('started', 'stopped', 'restarted')) { + New-WebAppPool $name + $result.changed = $TRUE + } + + # Remove + if ($pool -and $state -eq 'absent') { + Remove-WebAppPool $name + $result.changed = $TRUE + } + + $pool = Get-Item IIS:\AppPools\$name + if($pool) { + # Set properties + $attributes.GetEnumerator() | foreach { + $newParameter = $_; + $currentParameter = Get-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key + if(-not $currentParameter -or ($currentParameter.Value -as [String]) -ne $newParameter.Value) { + Set-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key $newParameter.Value + $result.changed = $TRUE + } + } + + # Set run state + if (($state -eq 'stopped') -and ($pool.State -eq 'Started')) { + Stop-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) { + Start-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$pool = Get-Item IIS:\AppPools\$name +$result.info = @{ + name = $pool.Name + state = $pool.State + attributes = New-Object psobject @{} +}; + +$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)}; + +Exit-Json $result diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py new file mode 100644 index 00000000000..320fe07f637 --- /dev/null +++ b/windows/win_iis_webapppool.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webapppool +version_added: "1.9" +short_description: Configures a IIS Web Application Pool. +description: + - Creates, Removes and configures a IIS Web Application Pool +options: + name: + description: + - Names of application pool + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - absent + - stopped + - started + - restarted + required: false + default: null + aliases: [] + attributes: + description: + - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing application pool +$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows +host | success >> { + "attributes": {}, + "changed": false, + "info": { + "attributes": { + "CLRConfigFile": "", + "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415", + "autoStart": true, + "enable32BitAppOnWin64": false, + "enableConfigurationOverride": true, + "managedPipelineMode": 0, + "managedRuntimeLoader": "webengine4.dll", + "managedRuntimeVersion": "v4.0", + "name": "DefaultAppPool", + "passAnonymousToken": true, + "queueLength": 1000, + "startMode": 0, + "state": 1 + }, + "name": "DefaultAppPool", + "state": "Started" + } +} + +# This creates a new application pool in 'Started' state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows + +# This stoppes an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This change application pool attributes without touching state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + +# This creates an application pool and sets attributes +$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + + +# Playbook example +--- + +- name: App Pool with .NET 4.0 + win_iis_webapppool: + name: 'AppPool' + state: started + attributes: managedRuntimeVersion:v4.0 + register: webapppool + +''' diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1 new file mode 100644 index 00000000000..bdff43fc63c --- /dev/null +++ b/windows/win_iis_webbinding.ps1 @@ -0,0 +1,138 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ($FALSE, 'present', 'absent'); +If ($state -NotIn $valid_states) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +$binding_parameters = New-Object psobject @{ + Name = $name +}; + +If ($params.host_header) { + $binding_parameters.HostHeader = $params.host_header +} + +If ($params.protocol) { + $binding_parameters.Protocol = $params.protocol +} + +If ($params.port) { + $binding_parameters.Port = $params.port +} + +If ($params.ip) { + $binding_parameters.IPAddress = $params.ip +} + +$certificateHash = Get-Attr $params "certificate_hash" $FALSE; +$certificateStoreName = Get-Attr $params "certificate_store_name" "MY"; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){ + Import-Module WebAdministration +} + +function Create-Binding-Info { + return New-Object psobject @{ + "bindingInformation" = $args[0].bindingInformation + "certificateHash" = $args[0].certificateHash + "certificateStoreName" = $args[0].certificateStoreName + "isDsMapperEnabled" = $args[0].isDsMapperEnabled + "protocol" = $args[0].protocol + "sslFlags" = $args[0].sslFlags + } +} + +# Result +$result = New-Object psobject @{ + changed = $false + parameters = $binding_parameters + matched = @() + removed = @() + added = @() +}; + +# Get bindings matching parameters +$curent_bindings = Get-WebBinding @binding_parameters +$curent_bindings | Foreach { + $result.matched += Create-Binding-Info $_ +} + +try { + # Add + if (-not $curent_bindings -and $state -eq 'present') { + New-WebBinding @binding_parameters -Force + + # Select certificat + if($certificateHash -ne $FALSE) { + + $ip = $binding_parameters.IPAddress + if((!$ip) -or ($ip -eq "*")) { + $ip = "0.0.0.0" + } + + $port = $binding_parameters.Port + if(!$port) { + $port = 443 + } + + $result.port = $port + $result.ip = $ip + + Push-Location IIS:\SslBindings\ + Get-Item Cert:\LocalMachine\$certificateStoreName\$certificateHash | New-Item "$($ip)!$($port)" + Pop-Location + } + + $result.added += Create-Binding-Info (Get-WebBinding @binding_parameters) + $result.changed = $true + } + + # Remove + if ($curent_bindings -and $state -eq 'absent') { + $curent_bindings | foreach { + Remove-WebBinding -InputObject $_ + $result.removed += Create-Binding-Info $_ + } + $result.changed = $true + } + + +} +catch { + Fail-Json $result $_.Exception.Message +} + +Exit-Json $result diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py new file mode 100644 index 00000000000..0cc5da158bf --- /dev/null +++ b/windows/win_iis_webbinding.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webbinding +version_added: "1.9" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a binding to an existing IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - present + - absent + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + host_header: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + certificate_hash: + description: + - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate. + required: false + default: null + aliases: [] + certificate_store_name: + description: + - Name of the certificate store where the certificate for the binding is located. + required: false + default: "My" + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This will return binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site'" windows +host | success >> { + "added": [], + "changed": false, + "matched": [ + { + "bindingInformation": "*:80:", + "certificateHash": "", + "certificateStoreName": "", + "isDsMapperEnabled": false, + "protocol": "http", + "sslFlags": 0 + } + ], + "parameters": { + "Name": "Default Web Site" + }, + "removed": [] +} + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https" windows + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port:9090 state=present" windows + +# This will add a HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will remove the HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will add a HTTPS binding +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https state=present" windows + +# This will add a HTTPS binding and select certificate to use +# ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https certificate_hash= B0D0FA8408FC67B230338FCA584D03792DA73F4C" windows + + +# Playbook example +--- + +- name: Website http/https bidings + win_iis_webbinding: + name: "Default Web Site" + protocol: https + port: 443 + certificate_hash: "D1A3AF8988FD32D1A3AF8988FD323792DA73F4C" + state: present + when: monitor_use_https + +''' diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1 new file mode 100644 index 00000000000..bba1e941142 --- /dev/null +++ b/windows/win_iis_website.ps1 @@ -0,0 +1,179 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$state.ToString().ToLower(); +If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted') -and ($state -ne 'absent')) { + Fail-Json (New-Object psobject) "state is '$state'; must be 'started', 'restarted', 'stopped' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + +# Binding Parameters +$bind_port = Get-Attr $params "port" $FALSE; +$bind_ip = Get-Attr $params "ip" $FALSE; +$bind_hostname = Get-Attr $params "hostname" $FALSE; +$bind_ssl = Get-Attr $params "ssl" $FALSE; + +# Custom site Parameters from string where properties +# are seperated by a pipe and property name/values by colon. +# Ex. "foo:1|bar:2" +$parameters = Get-Attr $params "parameters" $null; +if($parameters -ne $null) { + $parameters = @($parameters -split '\|' | ForEach { + return ,($_ -split "\:", 2); + }) +} + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + site = New-Object psobject + changed = $false +}; + +# Site info +$site = Get-Website -Name $name + +Try { + # Add site + If(($state -ne 'absent') -and (-not $site)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + ElseIf (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $site_parameters = New-Object psobject @{ + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $site_parameters.ApplicationPool = $application_pool + } + + If ($bind_port) { + $site_parameters.Port = $bind_port + } + + If ($bind_ip) { + $site_parameters.IPAddress = $bind_ip + } + + If ($bind_hostname) { + $site_parameters.HostHeader = $bind_hostname + } + + $site = New-Website @site_parameters -Force + $result.changed = $true + } + + # Remove site + If ($state -eq 'absent' -and $site) { + $site = Remove-Website -Name $name + $result.changed = $true + } + + $site = Get-Website -Name $name + If($site) { + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $folder = Get-Item $physical_path + If($folder.FullName -ne $site.PhysicalPath) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name physicalPath -value $folder.FullName + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $site.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + + # Set properties + if($parameters) { + $parameters | foreach { + $parameter_value = Get-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] + if((-not $parameter_value) -or ($parameter_value.Value -as [String]) -ne $_[1]) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] $_[1] + $result.changed = $true + } + } + } + + # Set run state + if (($state -eq 'stopped') -and ($site.State -eq 'Started')) + { + Stop-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + if ((($state -eq 'started') -and ($site.State -eq 'Stopped')) -or ($state -eq 'restarted')) + { + Start-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + } +} +Catch +{ + Fail-Json (New-Object psobject) $_.Exception.Message +} + +$site = Get-Website -Name $name +$result.site = New-Object psobject @{ + Name = $site.Name + ID = $site.ID + State = $site.State + PhysicalPath = $site.PhysicalPath + ApplicationPool = $site.applicationPool + Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation }) +} + + +Exit-Json $result diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py new file mode 100644 index 00000000000..0893b11c2bd --- /dev/null +++ b/windows/win_iis_website.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_website +version_added: "1.9" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the web site + choices: + - started + - restarted + - stopped + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new site. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + hostname: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + ssl: + description: + - Enables HTTPS binding on the site.. + required: false + default: null + aliases: [] + parameters: + description: + - Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing host +$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window +host | success >> { + "changed": false, + "site": { + "ApplicationPool": "DefaultAppPool", + "Bindings": [ + "*:80:" + ], + "ID": 1, + "Name": "Default Web Site", + "PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot", + "State": "Stopped" + } +} + +# This stops an existing site. +$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host + +# This creates a new site. +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + +# Change logfile . +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + + +# Playbook example +--- + +- name: Acme IIS site + win_iis_website: + name: "Acme" + state: started + port: 80 + ip: 127.0.0.1 + hostname: acme.local + application_pool: "acme" + physical_path: 'c:\\sites\\acme' + parameters: 'logfile.directory:c:\\sites\\logs' + register: website + +''' From 7c199cad252468c90a512bf735336285e893a200 Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 16 May 2015 21:53:27 +1000 Subject: [PATCH 023/157] Add dynamodb_table module --- cloud/amazon/dynamodb_table | 261 ++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100644 cloud/amazon/dynamodb_table diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table new file mode 100644 index 00000000000..7a200a3b271 --- /dev/null +++ b/cloud/amazon/dynamodb_table @@ -0,0 +1,261 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: dynamodb_table +short_description: Create, update or delete AWS Dynamo DB tables. +description: + - Create or delete AWS Dynamo DB tables. + - Can update the provisioned throughput on existing tables. + - Returns the status of the specified table. +author: Alan Loi (@loia) +requirements: + - "boto >= 2.13.2" +options: + state: + description: + - Create or delete the table + required: false + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Name of the table. + required: true + hash_key_name: + description: + - Name of the hash key. + - Required when state=present. + required: false + hash_key_type: + description: + - Type of the hash key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + range_key_name: + description: + - Name of the range key. + required: false + range_key_type: + description: + - Type of the range key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + read_capacity: + description: + - Read throughput capacity (units) to provision. + required: false + default: 1 + write_capacity: + description: + - Write throughput capacity (units) to provision. + required: false + default: 1 + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +# Create dynamo table with hash and range primary key +- dynamodb_table: + name: my-table + region: us-east-1 + hash_key_name: id + hash_key_type: STRING + range_key_name: create_time + range_key_type: NUMBER + read_capacity: 2 + write_capacity: 2 + +# Update capacity on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + read_capacity: 10 + write_capacity: 10 + +# Delete dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + state: absent +''' + +try: + import boto + import boto.dynamodb2 + from boto.dynamodb2.table import Table + from boto.dynamodb2.fields import HashKey, RangeKey + from boto.dynamodb2.types import STRING, NUMBER, BINARY + from boto.exception import BotoServerError, JSONResponseError + +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +DYNAMO_TYPE_MAP = { + 'STRING': STRING, + 'NUMBER': NUMBER, + 'BINARY': BINARY +} + + +def create_or_update_dynamo_table(connection, module): + table_name = module.params.get('name') + hash_key_name = module.params.get('hash_key_name') + hash_key_type = module.params.get('hash_key_type') + range_key_name = module.params.get('range_key_name') + range_key_type = module.params.get('range_key_type') + read_capacity = module.params.get('read_capacity') + write_capacity = module.params.get('write_capacity') + + schema = [ + HashKey(hash_key_name, map_dynamo_type(hash_key_type)), + RangeKey(range_key_name, map_dynamo_type(range_key_type)) + ] + throughput = { + 'read': read_capacity, + 'write': write_capacity + } + + result = dict( + region=module.params.get('region'), + table_name=table_name, + hash_key_name=hash_key_name, + hash_key_type=hash_key_type, + range_key_name=range_key_name, + range_key_type=range_key_type, + read_capacity=read_capacity, + write_capacity=write_capacity, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + changed = update_dynamo_table(table, throughput=throughput) + else: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + changed = True + + result['table_status'] = table.describe()['Table']['TableStatus'] + result['changed'] = changed + + except BotoServerError: + result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def delete_dynamo_table(connection, module): + table_name = module.params.get('table_name') + + result = dict( + region=module.params.get('region'), + table_name=table_name, + ) + + try: + changed = False + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + table.delete() + changed = True + + result['changed'] = changed + + except BotoServerError: + result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def dynamo_table_exists(table): + try: + table.describe() + return True + + except JSONResponseError, e: + if e.message and e.message.startswith('Requested resource not found'): + return False + else: + raise e + + +def update_dynamo_table(table, throughput=None): + table.describe() # populate table details + + # AWS complains if the throughput hasn't changed + if has_throughput_changed(table, throughput): + return table.update(throughput=throughput) + + return False + + +def has_throughput_changed(table, new_throughput): + if not new_throughput: + return False + + return new_throughput['read'] != table.throughput['read'] or \ + new_throughput['write'] != table.throughput['write'] + + +def map_dynamo_type(dynamo_type): + return DYNAMO_TYPE_MAP.get(dynamo_type) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='str'), + hash_key_name=dict(required=True, type='str'), + hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + range_key_name=dict(type='str'), + range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + read_capacity=dict(default=1, type='int'), + write_capacity=dict(default=1, type='int'), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + connection = boto.dynamodb2.connect_to_region(region) + + state = module.params.get('state') + if state == 'present': + create_or_update_dynamo_table(connection, module) + elif state == 'absent': + delete_dynamo_table(connection, module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From d1c896d31edf7bea35c02bd641555626b4caa79b Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Fri, 1 May 2015 21:17:34 +0100 Subject: [PATCH 024/157] win_scheduled_task module for windows Fledgling module to allow scheduled tasks to be managed. At present, I only need enabled/disabled support. There's lots of scope for more features. --- windows/win_scheduled_task.ps1 | 77 ++++++++++++++++++++++++++++++++++ windows/win_scheduled_task.py | 54 ++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 windows/win_scheduled_task.ps1 create mode 100644 windows/win_scheduled_task.py diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 new file mode 100644 index 00000000000..2716ed32ea9 --- /dev/null +++ b/windows/win_scheduled_task.ps1 @@ -0,0 +1,77 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Peter Mounce +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +$ErrorActionPreference = "Stop" + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +if ($params.name) +{ + $package = $params.name +} +else +{ + Fail-Json $result "missing required argument: name" +} +if ($params.state) +{ + $state = $params.state.ToString() + if (($state -ne 'Enabled') -and ($state -ne 'Disabled')) + { + Fail-Json $result "state is '$state'; must be 'Enabled' or 'Disabled'" + } +} +else +{ + $state = "Enabled" +} + + +try +{ + $tasks = Get-ScheduledTask -TaskPath $name + $tasks_needing_changing |? { $_.State -ne $state } + if ($tasks_needing_changing -eq $null) + { + if ($state -eq 'Disabled') + { + $tasks_needing_changing | Disable-ScheduledTask + } + elseif ($state -eq 'Enabled') + { + $tasks_needing_changing | Enable-ScheduledTask + } + Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) + $result.changed = $true + } + else + { + Set-Attr $result "tasks_changed" @() + $result.changed = $false + } + Exit-Json $result; +} +catch +{ + Fail-Json $result $_.Exception.Message +} diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py new file mode 100644 index 00000000000..ac353c14c0a --- /dev/null +++ b/windows/win_scheduled_task.py @@ -0,0 +1,54 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Peter Mounce +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_scheduled_task +version_added: "1.9" +short_description: Manage scheduled tasks +description: + - Manage scheduled tasks +options: + name: + description: + - Name of the scheduled task + - Supports * as wildcard + required: true + default: null + aliases: [] + state: + description: + - State that the task should become + required: false + choices: + - Disabled + - Enabled + default: Enabled + aliases: [] +author: Peter Mounce +''' + +EXAMPLES = ''' + # Disable the scheduled tasks with "WindowsUpdate" in their name + win_scheduled_task: name="*WindowsUpdate*" state=disabled +''' From 6f1d9fbbccea3c37f3ab672a544903297da311a5 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Sat, 2 May 2015 13:56:01 +0100 Subject: [PATCH 025/157] correct variable name --- windows/win_scheduled_task.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 2716ed32ea9..763bfb53862 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -27,7 +27,7 @@ Set-Attr $result "changed" $false; if ($params.name) { - $package = $params.name + $name = $params.name } else { From 4fef779f09b0d3b8d7fd7fa893d54c4fc09f2475 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Sat, 2 May 2015 17:24:30 +0100 Subject: [PATCH 026/157] caught out by syntax --- windows/win_scheduled_task.ps1 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 763bfb53862..52b68dd5b6a 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -50,8 +50,8 @@ else try { $tasks = Get-ScheduledTask -TaskPath $name - $tasks_needing_changing |? { $_.State -ne $state } - if ($tasks_needing_changing -eq $null) + $tasks_needing_changing = $tasks |? { $_.State -ne $state } + if (-not($tasks_needing_changing -eq $null)) { if ($state -eq 'Disabled') { @@ -69,6 +69,7 @@ try Set-Attr $result "tasks_changed" @() $result.changed = $false } + Exit-Json $result; } catch From ede4820562423632610359c07623a158acf0282f Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Wed, 6 May 2015 21:47:39 +0100 Subject: [PATCH 027/157] version_added -> 2, remove empty aliases --- windows/win_scheduled_task.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index ac353c14c0a..e755890b319 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' --- module: win_scheduled_task -version_added: "1.9" +version_added: "2.0" short_description: Manage scheduled tasks description: - Manage scheduled tasks @@ -35,7 +35,6 @@ options: - Supports * as wildcard required: true default: null - aliases: [] state: description: - State that the task should become @@ -44,7 +43,6 @@ options: - Disabled - Enabled default: Enabled - aliases: [] author: Peter Mounce ''' From d9211b709b2f6a8bb46118ec3ae95907551c158f Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Wed, 6 May 2015 21:48:19 +0100 Subject: [PATCH 028/157] no default, remove it --- windows/win_scheduled_task.py | 1 - 1 file changed, 1 deletion(-) diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index e755890b319..7c604ecec20 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -34,7 +34,6 @@ options: - Name of the scheduled task - Supports * as wildcard required: true - default: null state: description: - State that the task should become From a4a3a1343953cf996b57bb6b91a55cdb6678ca12 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Tue, 19 May 2015 11:21:23 +0100 Subject: [PATCH 029/157] Code-review Swap state enabled/disabled -> enabled yes/no --- windows/win_scheduled_task.ps1 | 22 +++++++++------------- windows/win_scheduled_task.py | 10 +++++----- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 52b68dd5b6a..2f802f59cd0 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -33,33 +33,29 @@ else { Fail-Json $result "missing required argument: name" } -if ($params.state) +if ($params.enabled) { - $state = $params.state.ToString() - if (($state -ne 'Enabled') -and ($state -ne 'Disabled')) - { - Fail-Json $result "state is '$state'; must be 'Enabled' or 'Disabled'" - } + $enabled = $params.enabled | ConvertTo-Bool } else { - $state = "Enabled" + $enabled = $true } - +$target_state = @{$true = "Enabled"; $false="Disabled"}[$enabled] try { $tasks = Get-ScheduledTask -TaskPath $name - $tasks_needing_changing = $tasks |? { $_.State -ne $state } + $tasks_needing_changing = $tasks |? { $_.State -ne $target_state } if (-not($tasks_needing_changing -eq $null)) { - if ($state -eq 'Disabled') + if ($enabled) { - $tasks_needing_changing | Disable-ScheduledTask + $tasks_needing_changing | Enable-ScheduledTask } - elseif ($state -eq 'Enabled') + else { - $tasks_needing_changing | Enable-ScheduledTask + $tasks_needing_changing | Disable-ScheduledTask } Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) $result.changed = $true diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index 7c604ecec20..2c5867402c5 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -34,18 +34,18 @@ options: - Name of the scheduled task - Supports * as wildcard required: true - state: + enabled: description: - State that the task should become required: false choices: - - Disabled - - Enabled - default: Enabled + - yes + - no + default: yes author: Peter Mounce ''' EXAMPLES = ''' # Disable the scheduled tasks with "WindowsUpdate" in their name - win_scheduled_task: name="*WindowsUpdate*" state=disabled + win_scheduled_task: name="*WindowsUpdate*" enabled=no ''' From 172b012ee2982f15025b1022ffd7b0ef442893d9 Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Wed, 3 Jun 2015 08:46:29 -0700 Subject: [PATCH 030/157] now handles keys protected with a passphrase --- cloud/amazon/ec2_win_password.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index 33a6ae7f947..c873bb9ecb0 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -17,6 +17,10 @@ options: description: - path to the file containing the key pair used on the instance required: true + key_passphrase: + description: + - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3. + required: false region: description: - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -36,6 +40,16 @@ tasks: instance_id: i-XXXXXX region: us-east-1 key_file: "~/aws-creds/my_test_key.pem" + +# Example of getting a password with a password protected key +tasks: +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_protected_test_key.pem" + key_passphrase: "secret" ''' from base64 import b64decode @@ -54,6 +68,7 @@ def main(): argument_spec.update(dict( instance_id = dict(required=True), key_file = dict(required=True), + key_passphrase = dict(default=None), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -63,6 +78,7 @@ def main(): instance_id = module.params.get('instance_id') key_file = expanduser(module.params.get('key_file')) + key_passphrase = module.params.get('key_passphrase') ec2 = ec2_connect(module) @@ -70,7 +86,7 @@ def main(): decoded = b64decode(data) f = open(key_file, 'r') - key = RSA.importKey(f.read()) + key = RSA.importKey(f.read(), key_passphrase) cipher = PKCS1_v1_5.new(key) sentinel = 'password decryption failed!!!' From 80153611abda732446f8412ed21fd04a93aa96fb Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Wed, 3 Jun 2015 09:06:43 -0700 Subject: [PATCH 031/157] added version_added to key_passphrase --- cloud/amazon/ec2_win_password.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index c873bb9ecb0..6a81192016a 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -18,6 +18,7 @@ options: - path to the file containing the key pair used on the instance required: true key_passphrase: + version_added: "2.0" description: - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3. required: false From 70a7a46d52751f9bed4cb4fa8de08ae9ac9f57ad Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Tue, 9 Jun 2015 08:55:58 -0700 Subject: [PATCH 032/157] optionally supports waiting for the password to be available --- cloud/amazon/ec2_win_password.py | 59 ++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index 6a81192016a..05aa67e3d29 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -15,7 +15,7 @@ options: required: true key_file: description: - - path to the file containing the key pair used on the instance + - Path to the file containing the key pair used on the instance. required: true key_passphrase: version_added: "2.0" @@ -28,6 +28,18 @@ options: required: false default: null aliases: [ 'aws_region', 'ec2_region' ] + wait: + version_added: "2.0" + description: + - Whether or not to wait for the password to be available before returning. + required: false + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + version_added: "2.0" + description: + - Number of seconds to wait before giving up. + default: 120 extends_documentation_fragment: aws ''' @@ -51,12 +63,24 @@ tasks: region: us-east-1 key_file: "~/aws-creds/my_protected_test_key.pem" key_passphrase: "secret" + +# Example of waiting for a password +tasks: +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_test_key.pem" + wait: yes + wait_timeout: 45 ''' from base64 import b64decode from os.path import expanduser from Crypto.Cipher import PKCS1_v1_5 from Crypto.PublicKey import RSA +import datetime try: import boto.ec2 @@ -70,6 +94,8 @@ def main(): instance_id = dict(required=True), key_file = dict(required=True), key_passphrase = dict(default=None), + wait = dict(type='bool', default=False), + wait_timeout = dict(default=120), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -80,11 +106,28 @@ def main(): instance_id = module.params.get('instance_id') key_file = expanduser(module.params.get('key_file')) key_passphrase = module.params.get('key_passphrase') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) ec2 = ec2_connect(module) - data = ec2.get_password_data(instance_id) - decoded = b64decode(data) + if wait: + start = datetime.datetime.now() + end = start + datetime.timedelta(seconds=wait_timeout) + + while datetime.datetime.now() < end: + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + if wait and not decoded: + time.sleep(5) + else: + break + else: + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + + if wait and datetime.datetime.now() >= end: + module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout) f = open(key_file, 'r') key = RSA.importKey(f.read(), key_passphrase) @@ -92,14 +135,18 @@ def main(): sentinel = 'password decryption failed!!!' try: - decrypted = cipher.decrypt(decoded, sentinel) + decrypted = cipher.decrypt(decoded, sentinel) except ValueError as e: - decrypted = None + decrypted = None if decrypted == None: module.exit_json(win_password='', changed=False) else: - module.exit_json(win_password=decrypted, changed=True) + if wait: + elapsed = datetime.datetime.now() - start + module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds) + else: + module.exit_json(win_password=decrypted, changed=True) # import module snippets from ansible.module_utils.basic import * From 6a89b92cdaba2f98196bcdafefb9dbcee503e650 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Fri, 12 Jun 2015 18:36:23 +0000 Subject: [PATCH 033/157] Fixed results & 'Changed'. Added 'deleted' action. Added ability to specify multiple services. --- monitoring/pagerduty.py | 72 +++++++++++++++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 14 deletions(-) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index 24c622c83a8..2ed7c0cc6bb 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -11,6 +11,7 @@ author: - "Andrew Newdigate (@suprememoocow)" - "Dylan Silva (@thaumos)" - "Justin Johns" + - "Bruce Pennypacker" requirements: - PagerDuty API access options: @@ -19,7 +20,7 @@ options: - Create a maintenance window or get a list of ongoing windows. required: true default: null - choices: [ "running", "started", "ongoing" ] + choices: [ "running", "started", "ongoing", "deleted" ] aliases: [] name: description: @@ -61,11 +62,11 @@ options: version_added: '1.8' service: description: - - PagerDuty service ID. + - A comma separated list of PagerDuty service IDs. required: false default: null choices: [] - aliases: [] + aliases: [ services ] hours: description: - Length of maintenance window in hours. @@ -96,9 +97,6 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 - -notes: - - This module does not yet have support to end maintenance windows. ''' EXAMPLES=''' @@ -132,6 +130,14 @@ EXAMPLES=''' service=FOO123 hours=4 desc=deployment + register: pd_window + +# Delete the previous maintenance window +- pagerduty: name=companyabc + user=example@example.com + passwd=password123 + state=deleted + service={{ pd_window.result.maintenance_window.id }} ''' import datetime @@ -152,7 +158,7 @@ def ongoing(module, name, user, passwd, token): if info['status'] != 200: module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - return False, response.read() + return False, response.read(), False def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): @@ -166,7 +172,8 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu 'Authorization': auth_header(user, passwd, token), 'Content-Type' : 'application/json', } - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}} + request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}} + if requester_id: request_data['requester_id'] = requester_id else: @@ -178,19 +185,50 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu if info['status'] != 200: module.fail_json(msg="failed to create the window: %s" % info['msg']) - return False, response.read() + try: + json_out = json.loads(response.read()) + except: + json_out = "" + + return False, json_out, True + +def delete(module, name, user, passwd, token, requester_id, service): + url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0] + headers = { + 'Authorization': auth_header(user, passwd, token), + 'Content-Type' : 'application/json', + } + request_data = {} + + if requester_id: + request_data['requester_id'] = requester_id + else: + if token: + module.fail_json(msg="requester_id is required when using a token") + + data = json.dumps(request_data) + response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') + if info['status'] != 200: + module.fail_json(msg="failed to delete the window: %s" % info['msg']) + + try: + json_out = json.loads(response.read()) + except: + json_out = "" + + return False, json_out, True def main(): module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing']), + state=dict(required=True, choices=['running', 'started', 'ongoing', 'deleted']), name=dict(required=True), user=dict(required=False), passwd=dict(required=False), token=dict(required=False), - service=dict(required=False), + service=dict(required=False, type='list', aliases=["services"]), requester_id=dict(required=False), hours=dict(default='1', required=False), minutes=dict(default='0', required=False), @@ -217,15 +255,21 @@ def main(): if state == "running" or state == "started": if not service: module.fail_json(msg="service not specified") - (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) + (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) + if rc == 0: + changed=True if state == "ongoing": - (rc, out) = ongoing(module, name, user, passwd, token) + (rc, out, changed) = ongoing(module, name, user, passwd, token) + + if state == "deleted": + (rc, out, changed) = delete(module, name, user, passwd, token, requester_id, service) if rc != 0: module.fail_json(msg="failed", result=out) - module.exit_json(msg="success", result=out) + + module.exit_json(msg="success", result=out, changed=changed) # import module snippets from ansible.module_utils.basic import * From 51bba578b6fa6103400c45d9b7015299797aef4d Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Fri, 12 Jun 2015 19:58:57 +0000 Subject: [PATCH 034/157] Updated 'ongoing' to also return properly formatted json --- monitoring/pagerduty.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index 2ed7c0cc6bb..bd35fbb6003 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -158,7 +158,12 @@ def ongoing(module, name, user, passwd, token): if info['status'] != 200: module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - return False, response.read(), False + try: + json_out = json.loads(response.read()) + except: + json_out = "" + + return False, json_out, False def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): From 9f0ee40b42f491421e582066c8b82ea95d0cf769 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 18:57:00 -0500 Subject: [PATCH 035/157] Add ec2_vpc_igw module. --- cloud/amazon/ec2_vpc_igw.py | 189 ++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_igw.py diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py new file mode 100644 index 00000000000..1c5bf9dea1c --- /dev/null +++ b/cloud/amazon/ec2_vpc_igw.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS internat gateway in a virtual private cloud. ''' +'''This module has a dependency on python-boto. +version_added: "1.8" +options: + vpc_id: + description: + - "The VPC ID for which to create or remove the Internet Gateway." + required: true + state: + description: + - Create or terminate the IGW + required: true + default: present + aliases: [] + region: + description: + - region in which the resource exists. + required: false + default: null + aliases: ['aws_region', 'ec2_region'] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto''' +''' versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + +requirements: [ "boto" ] +author: Robert Estelle +''' + +EXAMPLES = ''' +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use +# in setting up NATs etc. + local_action: + module: ec2_vpc_igw + vpc_id: {{vpc.vpc_id}} + region: {{vpc.vpc.region}} + state: present + register: igw +''' + + +import sys + +try: + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +class IGWExcepton(Exception): + pass + + +def ensure_igw_absent(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if not igws: + return {'changed': False} + + if check_mode: + return {'changed': True} + + for igw in igws: + try: + vpc_conn.detach_internet_gateway(igw.id, vpc_id) + vpc_conn.delete_internet_gateway(igw.id) + except EC2ResponseError as e: + raise IGWExcepton('Unable to delete Internet Gateway, error: {0}' + .format(e)) + + return {'changed': True} + + +def ensure_igw_present(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if len(igws) > 1: + raise IGWExcepton( + 'EC2 returned more than one Internet Gateway for VPC {0}, aborting' + .format(vpc_id)) + + if igws: + return {'changed': False, 'gateway_id': igws[0].id} + else: + if check_mode: + return {'changed': True, 'gateway_id': None} + + try: + igw = vpc_conn.create_internet_gateway() + vpc_conn.attach_internet_gateway(igw.id, vpc_id) + return {'changed': True, 'gateway_id': igw.id} + except EC2ResponseError as e: + raise IGWExcepton('Unable to create Internet Gateway, error: {0}' + .format(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update({ + 'vpc_id': {'required': True}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + }) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + if not region: + module.fail_json(msg='Region must be specified') + + try: + vpc_conn = boto.vpc.connect_to_region( + region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + vpc_id = module.params.get('vpc_id') + state = module.params.get('state', 'present') + + try: + if state == 'present': + result = ensure_igw_present(vpc_conn, vpc_id, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_igw_absent(vpc_conn, vpc_id, + check_mode=module.check_mode) + except IGWExcepton as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa + +if __name__ == '__main__': + main() From 829759fba7f392e5998e5508faa2c30b85249ea2 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 16:01:46 -0500 Subject: [PATCH 036/157] ec2_vpc_igw - Exit with fail_json when boto is unavailable. --- cloud/amazon/ec2_vpc_igw.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 1c5bf9dea1c..7276157bd56 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -83,15 +83,17 @@ EXAMPLES = ''' ''' -import sys +import sys # noqa try: import boto.ec2 import boto.vpc from boto.exception import EC2ResponseError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False + if __name__ != '__main__': + raise class IGWExcepton(Exception): @@ -153,6 +155,8 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) if not region: From 6b32b95252c582a1687d98e435f5e33726c8a59d Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 16:02:09 -0500 Subject: [PATCH 037/157] ec2_vpc_igw - Rename IGWException to AnsibleIGWException. --- cloud/amazon/ec2_vpc_igw.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 7276157bd56..cbac94528d2 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -96,7 +96,7 @@ except ImportError: raise -class IGWExcepton(Exception): +class AnsibleIGWException(Exception): pass @@ -115,8 +115,8 @@ def ensure_igw_absent(vpc_conn, vpc_id, check_mode): vpc_conn.detach_internet_gateway(igw.id, vpc_id) vpc_conn.delete_internet_gateway(igw.id) except EC2ResponseError as e: - raise IGWExcepton('Unable to delete Internet Gateway, error: {0}' - .format(e)) + raise AnsibleIGWException( + 'Unable to delete Internet Gateway, error: {0}'.format(e)) return {'changed': True} @@ -126,7 +126,7 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): filters={'attachment.vpc-id': vpc_id}) if len(igws) > 1: - raise IGWExcepton( + raise AnsibleIGWException( 'EC2 returned more than one Internet Gateway for VPC {0}, aborting' .format(vpc_id)) @@ -141,8 +141,8 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): vpc_conn.attach_internet_gateway(igw.id, vpc_id) return {'changed': True, 'gateway_id': igw.id} except EC2ResponseError as e: - raise IGWExcepton('Unable to create Internet Gateway, error: {0}' - .format(e)) + raise AnsibleIGWException( + 'Unable to create Internet Gateway, error: {0}'.format(e)) def main(): @@ -181,7 +181,7 @@ def main(): elif state == 'absent': result = ensure_igw_absent(vpc_conn, vpc_id, check_mode=module.check_mode) - except IGWExcepton as e: + except AnsibleIGWException as e: module.fail_json(msg=str(e)) module.exit_json(**result) From c21eebdd7b40f76a5e9d6d60102773e597f096a6 Mon Sep 17 00:00:00 2001 From: Rob White Date: Sun, 14 Jun 2015 16:31:31 +1000 Subject: [PATCH 038/157] Updated documentation and added boto profile support. --- cloud/amazon/ec2_vpc_igw.py | 94 ++++++++++++------------------------- 1 file changed, 30 insertions(+), 64 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index cbac94528d2..63be48248ef 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -1,75 +1,42 @@ #!/usr/bin/python -# This file is part of Ansible # -# Ansible is free software: you can redistribute it and/or modify +# This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# Ansible is distributed in the hope that it will be useful, +# This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# along with this library. If not, see . DOCUMENTATION = ''' --- module: ec2_vpc_igw -short_description: configure AWS virtual private clouds +short_description: Manage an AWS VPC Internet gateway description: - - Create or terminates AWS internat gateway in a virtual private cloud. ''' -'''This module has a dependency on python-boto. -version_added: "1.8" + - Manage an AWS VPC Internet gateway +version_added: "2.0" +author: Robert Estelle, @erydo options: vpc_id: description: - - "The VPC ID for which to create or remove the Internet Gateway." + - The VPC ID for the VPC in which to manage the Internet Gateway. required: true + default: null state: description: - Create or terminate the IGW - required: true - default: present - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto''' -''' versions >= 2.6.0. required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - -requirements: [ "boto" ] -author: Robert Estelle + default: present +extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. # The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use @@ -147,40 +114,39 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): def main(): argument_spec = ec2_argument_spec() - argument_spec.update({ - 'vpc_id': {'required': True}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - }) + argument_spec.update( + dict( + vpc_id = dict(required=True), + state = dict(choices=['present', 'absent'], default='present') + ) + ) + module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - if not region: - module.fail_json(msg='Region must be specified') + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - vpc_conn = boto.vpc.connect_to_region( - region, - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key - ) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") vpc_id = module.params.get('vpc_id') state = module.params.get('state', 'present') try: if state == 'present': - result = ensure_igw_present(vpc_conn, vpc_id, - check_mode=module.check_mode) + result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode) elif state == 'absent': - result = ensure_igw_absent(vpc_conn, vpc_id, - check_mode=module.check_mode) + result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode) except AnsibleIGWException as e: module.fail_json(msg=str(e)) From 9285d0a1c75b21a412e70a0e432d1a125945e179 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Sun, 14 Jun 2015 20:20:36 +0000 Subject: [PATCH 039/157] changed 'deleted' to 'absent' --- monitoring/pagerduty.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index bd35fbb6003..b35cfbf4992 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -20,7 +20,7 @@ options: - Create a maintenance window or get a list of ongoing windows. required: true default: null - choices: [ "running", "started", "ongoing", "deleted" ] + choices: [ "running", "started", "ongoing", "absent" ] aliases: [] name: description: @@ -136,7 +136,7 @@ EXAMPLES=''' - pagerduty: name=companyabc user=example@example.com passwd=password123 - state=deleted + state=absent service={{ pd_window.result.maintenance_window.id }} ''' @@ -197,7 +197,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu return False, json_out, True -def delete(module, name, user, passwd, token, requester_id, service): +def absent(module, name, user, passwd, token, requester_id, service): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0] headers = { 'Authorization': auth_header(user, passwd, token), @@ -228,7 +228,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing', 'deleted']), + state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), name=dict(required=True), user=dict(required=False), passwd=dict(required=False), @@ -267,8 +267,8 @@ def main(): if state == "ongoing": (rc, out, changed) = ongoing(module, name, user, passwd, token) - if state == "deleted": - (rc, out, changed) = delete(module, name, user, passwd, token, requester_id, service) + if state == "absent": + (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service) if rc != 0: module.fail_json(msg="failed", result=out) From 5c39a5cc197f7874595bb19bdd611a759a07518b Mon Sep 17 00:00:00 2001 From: whiter Date: Wed, 15 Apr 2015 17:45:41 +1000 Subject: [PATCH 040/157] New module - ec2_eni --- cloud/amazon/ec2_eni.py | 404 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 404 insertions(+) create mode 100644 cloud/amazon/ec2_eni.py diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py new file mode 100644 index 00000000000..2b34e9b9405 --- /dev/null +++ b/cloud/amazon/ec2_eni.py @@ -0,0 +1,404 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_eni +short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance +description: + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance. +version_added: "2.0" +author: Rob White, wimnat [at] gmail.com, @wimnat +options: + eni_id: + description: + - The ID of the ENI + required = false + default = null + instance_id: + description: + - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. + required: false + default: null + private_ip_address: + description: + - Private IP address. + required: false + default: null + subnet_id: + description: + - ID of subnet in which to create the ENI. Only required when state=present. + required: true + description: + description: + - Optional description of the ENI. + required: false + default: null + security_groups: + description: + - List of security groups associated with the interface. Only used when state=present. + required: false + default: null + state: + description: + - Create or delete ENI. + required: false + default: present + choices: [ 'present', 'absent' ] + device_index: + description: + - The index of the device for the network interface attachment on the instance. + required: false + default: 0 + force_detach: + description: + - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent. + required: false + default: no + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation. + required: false + source_dest_check: + description: + - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation. + required: false +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ENI. As no security group is defined, ENI will be created in default security group +- ec2_eni: + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI and attach it to an instance +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Destroy an ENI, detaching it from any instance if necessary +- ec2_eni: + eni_id: eni-xxxxxxx + force_detach: yes + state: absent + +# Update an ENI +- ec2_eni: + eni_id: eni-xxxxxxx + description: "My new description" + state: present + +# Detach an ENI from an instance +- ec2_eni: + eni_id: eni-xxxxxxx + instance_id: None + state: present + +### Delete an interface on termination +# First create the interface +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + register: eni + +# Modify the interface to enable the delete_on_terminaton flag +- ec2_eni: + eni_id: {{ "eni.interface.id" }} + delete_on_termination: true + +''' + +import time +import xml.etree.ElementTree as ET +import re + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_error_message(xml_string): + + root = ET.fromstring(xml_string) + for message in root.findall('.//Message'): + return message.text + + +def get_eni_info(interface): + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + +def wait_for_eni(eni, status): + + while True: + time.sleep(3) + eni.update() + # If the status is detached we just need attachment to disappear + if eni.attachment is None: + if status == "detached": + break + else: + if status == "attached" and eni.attachment.status == "attached": + break + + +def create_eni(connection, module): + + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + changed = False + + try: + eni = compare_eni(connection, module) + if eni is None: + eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups) + if instance_id is not None: + try: + eni.attach(instance_id, device_index) + except BotoServerError as ex: + eni.delete() + raise + changed = True + # Wait to allow creation / attachment to finish + wait_for_eni(eni, "attached") + eni.update() + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def modify_eni(connection, module): + + eni_id = module.params.get("eni_id") + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + force_detach = module.params.get("force_detach") + source_dest_check = module.params.get("source_dest_check") + delete_on_termination = module.params.get("delete_on_termination") + changed = False + + + try: + # Get the eni with the eni_id specified + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + if description is not None: + if eni.description != description: + connection.modify_network_interface_attribute(eni.id, "description", description) + changed = True + if security_groups is not None: + if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups): + connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups) + changed = True + if source_dest_check is not None: + if eni.source_dest_check != source_dest_check: + connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check) + changed = True + if delete_on_termination is not None: + if eni.attachment is not None: + if eni.attachment.delete_on_termination is not delete_on_termination: + connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) + changed = True + else: + module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached") + if eni.attachment is not None and instance_id is None and do_detach is True: + eni.detach(force_detach) + wait_for_eni(eni, "detached") + changed = True + else: + if instance_id is not None: + eni.attach(instance_id, device_index) + wait_for_eni(eni, "attached") + changed = True + + except BotoServerError as e: + print e + module.fail_json(msg=get_error_message(e.args[2])) + + eni.update() + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def delete_eni(connection, module): + + eni_id = module.params.get("eni_id") + force_detach = module.params.get("force_detach") + + try: + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + + if force_detach is True: + if eni.attachment is not None: + eni.detach(force_detach) + # Wait to allow detachment to finish + wait_for_eni(eni, "detached") + eni.update() + eni.delete() + changed = True + else: + eni.delete() + changed = True + + module.exit_json(changed=changed) + except BotoServerError as e: + msg = get_error_message(e.args[2]) + regex = re.compile('The networkInterface ID \'.*\' does not exist') + if regex.search(msg) is not None: + module.exit_json(changed=False) + else: + module.fail_json(msg=get_error_message(e.args[2])) + +def compare_eni(connection, module): + + eni_id = module.params.get("eni_id") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + + try: + all_eni = connection.get_all_network_interfaces(eni_id) + + for eni in all_eni: + remote_security_groups = get_sec_group_list(eni.groups) + if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): + return eni + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + return None + +def get_sec_group_list(groups): + + # Build list of remote security groups + remote_security_groups = [] + for group in groups: + remote_security_groups.append(group.id.encode()) + + return remote_security_groups + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + eni_id = dict(default=None), + instance_id = dict(default=None), + private_ip_address = dict(), + subnet_id = dict(), + description = dict(), + security_groups = dict(type='list'), + device_index = dict(default=0, type='int'), + state = dict(default='present', choices=['present', 'absent']), + force_detach = dict(default='no', type='bool'), + source_dest_check = dict(default=None, type='bool'), + delete_on_termination = dict(default=None, type='bool') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + state = module.params.get("state") + eni_id = module.params.get("eni_id") + + if state == 'present': + if eni_id is None: + if module.params.get("subnet_id") is None: + module.fail_json(msg="subnet_id must be specified when state=present") + create_eni(connection, module) + else: + modify_eni(connection, module) + elif state == 'absent': + if eni_id is None: + module.fail_json(msg="eni_id must be specified") + else: + delete_eni(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From 1b0676b559eb0dafb6dba6fe0502903821e0a701 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Wed, 17 Jun 2015 16:12:58 -0500 Subject: [PATCH 041/157] packaging/os/portage: Improve check mode handling When running in check mode, the *portage* module always reports that no changes were made, even if the requested packages do not exist on the system. This is because it was erroneously expecting `emerge --pretend` to produce the same output as `emerge` by itself would, and attempts to parse it. This is not correct, for several reasons. Most specifically, the string for which it is searching does not exist in the pretend output. Additionally, `emerge --pretend` always prints the requested packages, whether they are already installed or not; in the former case, it shows them as reinstalls. This commit adjusts the behavior to rely on `equery` alone when running in check mode. If `equery` reports at least one package is not installed, then nothing else is done: the system will definitely be changed. Signed-off-by: Dustin C. Hatch --- packaging/os/portage.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 2ce0379a8ec..712881a91ea 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -254,6 +254,8 @@ def emerge_packages(module, packages): break else: module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') args = [] emerge_flags = { From e3d608297d95a7c04d54303ee0abd6fda64dcde1 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Thu, 18 Jun 2015 13:55:03 -0500 Subject: [PATCH 042/157] packaging/os/portage: Handle noreplace in check mode The `--noreplace` argument to `emerge` is generally coupled with `--newuse` or `--changed-use`, and can be used instruct Portage to rebuild a package only if necessary. Simply checking to see if the package is already installed using `equery` is not sufficient to determine if any changes would be made, so that step is skipped when the `noreplace` module argument is specified. The module then falls back to parsing the output from `emerge` to determine if anything changed. In check mode, `emerge` is called with `--pretend`, so it produces different output, and the parsing fails to correctly infer that a change would be made. This commit adds another regular expression to check when running in check mode that matches the pretend output from `emerge`. Signed-off-by: Dustin C. Hatch --- packaging/os/portage.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 712881a91ea..79db8d74740 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -300,13 +300,18 @@ def emerge_packages(module, packages): changed = True for line in out.splitlines(): if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' break else: changed = False + msg = 'No packages installed.' module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages installed.', + msg=msg, ) From 623a29cc0ecb00ddea636b89414517380a29d48b Mon Sep 17 00:00:00 2001 From: Phil Date: Thu, 18 Jun 2015 16:15:15 -0500 Subject: [PATCH 043/157] update to not auto-install PSCX - will use built-in powershell method instead for .zip files - added example for installing pscx as a pretask --- windows/win_unzip.ps1 | 52 +++++-------------------------------------- windows/win_unzip.py | 17 ++++++++++++-- 2 files changed, 21 insertions(+), 48 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index 8e6db762fe1..35a55c811c4 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible # -# Copyright 2014, Phil Schwartz +# Copyright 2015, Phil Schwartz # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -80,43 +80,13 @@ If ($ext -eq ".zip" -And $recurse -eq $false) { Fail-Json $result "Error unzipping $src to $dest" } } -# Need PSCX +# Requires PSCX Else { - # Requires PSCX, will be installed if it isn't found - # Pscx-3.2.0.msi - $url = "http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959" - $msi = "C:\Pscx-3.2.0.msi" - # Check if PSCX is installed $list = Get-Module -ListAvailable - # If not download it and install + If (-Not ($list -match "PSCX")) { - # Try install with chocolatey - Try { - cinst -force PSCX -y - $choco = $true - } - Catch { - $choco = $false - } - # install from downloaded msi if choco failed or is not present - If ($choco -eq $false) { - Try { - $client = New-Object System.Net.WebClient - $client.DownloadFile($url, $msi) - } - Catch { - Fail-Json $result "Error downloading PSCX from $url and saving as $dest" - } - Try { - Start-Process -FilePath msiexec.exe -ArgumentList "/i $msi /qb" -Verb Runas -PassThru -Wait | out-null - } - Catch { - Fail-Json $result "Error installing $msi" - } - } - Set-Attr $result.win_zip "pscx_status" "pscx was installed" - $installed = $true + Fail-Json "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types." } Else { Set-Attr $result.win_zip "pscx_status" "present" @@ -124,17 +94,7 @@ Else { # Import Try { - If ($installed) { - Try { - Import-Module 'C:\Program Files (x86)\Powershell Community Extensions\pscx3\pscx\pscx.psd1' - } - Catch { - Import-Module PSCX - } - } - Else { - Import-Module PSCX - } + Import-Module PSCX } Catch { Fail-Json $result "Error importing module PSCX" @@ -193,4 +153,4 @@ Set-Attr $result.win_unzip "src" $src.toString() Set-Attr $result.win_unzip "dest" $dest.toString() Set-Attr $result.win_unzip "recurse" $recurse.toString() -Exit-Json $result; +Exit-Json $result; \ No newline at end of file diff --git a/windows/win_unzip.py b/windows/win_unzip.py index 35093aa8c76..2c3c41df0b7 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Phil Schwartz +# (c) 2015, Phil Schwartz # # This file is part of Ansible # @@ -74,7 +74,7 @@ options: required: false default: false aliases: [] -author: Phil Schwartz +author: Phil Schwartz ''' EXAMPLES = ''' @@ -126,4 +126,17 @@ $ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=t delay=15 timeout=600 state=started + +# Install PSCX to use for extracting a gz file + - name: Grab PSCX msi + win_get_url: + url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959' + dest: 'C:\\pscx.msi' + - name: Install PSCX + win_msi: + path: 'C:\\pscx.msi' + - name: Unzip gz log + win_unzip: + src: "C:\\Logs\\application-error-logs.gz" + dest: "C:\\ExtractedLogs\\application-error-logs" ''' From 5e5eec1806e406127484e18492f4c1d6b45a6341 Mon Sep 17 00:00:00 2001 From: Andrew Udvare Date: Thu, 18 Jun 2015 15:59:46 -0700 Subject: [PATCH 044/157] --usepkgonly does not imply --getbinpkg Add usepkg option to allow conditional building from source if binary packages are not found https://github.com/ansible/ansible-modules-extras/commit/5a6de937cb053d8366e06c01ec59b37c22d0629c#commitcomment-11755140 https://wiki.gentoo.org/wiki/Binary_package_guide#Using_binary_packages --- packaging/os/portage.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index ab96cb22e60..e62b0983033 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -267,14 +267,14 @@ def emerge_packages(module, packages): 'verbose': '--verbose', 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', } for flag, arg in emerge_flags.iteritems(): if p[flag]: args.append(arg) - # usepkgonly implies getbinpkg - if p['usepkgonly'] and not p['getbinpkg']: - args.append('--getbinpkg') + if 'usepkg' in p and 'usepkgonly' in p: + module.fail_json(msg='Use only one of usepkg, usepkgonly') cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: @@ -406,6 +406,7 @@ def main(): sync=dict(default=None, choices=['yes', 'web']), getbinpkg=dict(default=None, choices=['yes']), usepkgonly=dict(default=None, choices=['yes']), + usepkg=dict(default=None, choices=['yes']), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], From 6b8c462d6605341318279a9ab11cc6843642e230 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Fri, 19 Jun 2015 12:40:56 +1000 Subject: [PATCH 045/157] Add GUIDELINES for AWS module development Starting point for a reference when doing pull request reviews. If something doesn't meet the guidelines we can point people at them. If something is bad but is not mentioned in the guidelines, we should add it here. --- cloud/amazon/GUIDELINES.md | 88 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 cloud/amazon/GUIDELINES.md diff --git a/cloud/amazon/GUIDELINES.md b/cloud/amazon/GUIDELINES.md new file mode 100644 index 00000000000..ee5aea90ef7 --- /dev/null +++ b/cloud/amazon/GUIDELINES.md @@ -0,0 +1,88 @@ +Guidelines for AWS modules +-------------------------- + +Naming your module +================== + +Base the name of the module on the part of AWS that +you actually use. (A good rule of thumb is to take +whatever module you use with boto as a starting point). + +Don't further abbreviate names - if something is a well +known abbreviation due to it being a major component of +AWS, that's fine, but don't create new ones independently +(e.g. VPC, ELB, etc. are fine) + +Using boto +========== + +Wrap the `import` statements in a try block and fail the +module later on if the import fails + +``` +try: + import boto + import boto.module.that.you.use + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + module_specific_parameter=dict(), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') +``` + + +Try and keep backward compatibility with relatively recent +versions of boto. That means that if want to implement some +functionality that uses a new feature of boto, it should only +fail if that feature actually needs to be run, with a message +saying which version of boto is needed. + +Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`) +to check whether boto supports a feature rather than version checking + +e.g. from the `ec2` module: +``` +if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name +else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") +``` + + +Connecting to AWS +================= + +For EC2 you can just use + +``` +ec2 = ec2_connect(module) +``` + +For other modules, you should use `get_aws_connection_info` and then +`connect_to_aws`. To connect to an example `xyz` service: + +``` +region, ec2_url, aws_connect_params = get_aws_connection_info(module) +xyz = connect_to_aws(boto.xyz, region, **aws_connect_params) +``` + +The reason for using `get_aws_connection_info` and `connect_to_aws` +(and even `ec2_connect` uses those under the hood) rather than doing it +yourself is that they handle some of the more esoteric connection +options such as security tokens and boto profiles. From 628f2b98b69dba0fa741c87ddcd7c45108311509 Mon Sep 17 00:00:00 2001 From: Amir Moulavi Date: Fri, 19 Jun 2015 09:12:08 +0200 Subject: [PATCH 046/157] Implementation of EC2 AMI copy between regions --- cloud/amazon/ec2_ami_copy.py | 211 +++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 cloud/amazon/ec2_ami_copy.py diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py new file mode 100644 index 00000000000..909ec4a9c7a --- /dev/null +++ b/cloud/amazon/ec2_ami_copy.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_ami_copy +short_description: copies AMI between AWS regions, return new image id +description: + - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5 +version_added: "1.7" +options: + source_region: + description: + - the source region that AMI should be copied from + required: true + default: null + region: + description: + - the destination region that AMI should be copied to + required: true + default: null + aliases: ['aws_region', 'ec2_region', 'dest_region'] + source_image_id: + description: + - the id of the image in source region that should be copied + required: true + default: null + name: + description: + - The name of the new image to copy + required: false + default: null + description: + description: + - An optional human-readable string describing the contents and purpose of the new AMI. + required: false + default: null + wait: + description: + - wait for the copied AMI to be in state 'available' before returning. + required: false + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + required: false + default: 1200 + tags: + description: + - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}' + required: false + default: null + +author: Amir Moulavi +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic AMI Copy +- local_action: + module: ec2_ami_copy + source_region: eu-west-1 + dest_region: us-east-1 + source_image_id: ami-xxxxxxx + name: SuperService-new-AMI + description: latest patch + tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}' + wait: yes + register: image_id +''' + + +import sys +import time + +try: + import boto + import boto.ec2 + from boto.vpc import VPCConnection + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + +def copy_image(module, ec2): + """ + Copies an AMI + + module : AnsibleModule object + ec2: authenticated ec2 connection object + """ + + source_region = module.params.get('source_region') + source_image_id = module.params.get('source_image_id') + name = module.params.get('name') + description = module.params.get('description') + tags = module.params.get('tags') + wait_timeout = int(module.params.get('wait_timeout')) + wait = module.params.get('wait') + + try: + params = {'source_region': source_region, + 'source_image_id': source_image_id, + 'name': name, + 'description': description + } + + image_id = ec2.copy_image(**params).image_id + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + + img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait) + + img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait) + + register_tags_if_any(module, ec2, tags, image_id) + + module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True) + + +# register tags to the copied AMI in dest_region +def register_tags_if_any(module, ec2, tags, image_id): + if tags: + try: + ec2.create_tags([image_id], tags) + except Exception as e: + module.fail_json(msg=str(e)) + + +# wait here until the image is copied (i.e. the state becomes available +def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait): + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): + img = ec2.get_image(image_id) + time.sleep(3) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="timed out waiting for image to be copied") + return img + + +# wait until the image is recognized. +def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait): + for i in range(wait_timeout): + try: + return ec2.get_image(image_id) + except boto.exception.EC2ResponseError, e: + # This exception we expect initially right after registering the copy with EC2 API + if 'InvalidAMIID.NotFound' in e.error_code and wait: + time.sleep(1) + else: + # On any other exception we should fail + module.fail_json( + msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str( + e)) + else: + module.fail_json(msg="timed out waiting for image to be recognized") + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + source_region=dict(required=True), + source_image_id=dict(required=True), + name=dict(), + description=dict(default=""), + wait=dict(type='bool', default=False), + wait_timeout=dict(default=1200), + tags=dict(type='dict'))) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + ec2 = ec2_connect(module) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + try: + region, ec2_url, boto_params = get_aws_connection_info(module) + vpc = connect_to_aws(boto.vpc, region, **boto_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + if not region: + module.fail_json(msg="region must be specified") + + copy_image(module, ec2) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() + From 3f3a73da37c0c8e8425b2c41e7b9ee18f2851656 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Fri, 16 Jan 2015 15:59:17 +0100 Subject: [PATCH 047/157] Add sensu_check module --- monitoring/sensu_check.py | 328 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 328 insertions(+) create mode 100644 monitoring/sensu_check.py diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py new file mode 100644 index 00000000000..b968304c34f --- /dev/null +++ b/monitoring/sensu_check.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: sensu_check +short_description: Manage Sensu checks +version_added: 2.0 +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and will not be added to the check definition unless specified. + - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, + - they are simply specified for your convenience. +options: + name: + description: + - The name of the check + - This is the key that is used to determine whether a check exists + required: true + state: + description: Whether the check should be present or not + choices: [ 'present', 'absent' ] + required: false + default: present + path: + description: + - Path to the json file of the check to be added/removed. + - Will be created if it does not exist (unless I(state=absent)). + - The parent folders need to exist when I(state=present), otherwise an error will be thrown + required: false + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so + - you can get the original file back if you somehow clobbered it incorrectly. + choices: [ 'yes', 'no' ] + required: false + default: no + command: + description: + - Path to the sensu check to run (not required when I(state=absent)) + required: true + handlers: + description: + - List of handlers to notify when the check fails + required: false + default: [] + subscribers: + description: + - List of subscribers/channels this check should run for + - See sensu_subscribers to subscribe a machine to a channel + required: false + default: [] + interval: + description: + - Check interval in seconds + required: false + default: null + timeout: + description: + - Timeout for the check + required: false + default: 10 + handle: + description: + - Whether the check should be handled or not + choices: [ 'yes', 'no' ] + required: false + default: yes + subdue_begin: + description: + - When to disable handling of check failures + required: false + default: null + subdue_end: + description: + - When to enable handling of check failures + required: false + default: null + dependencies: + description: + - Other checks this check depends on, if dependencies fail, + - handling of this check will be disabled + required: false + default: [] + metric: + description: Whether the check is a metric + choices: [ 'yes', 'no' ] + required: false + default: no + standalone: + description: + - Whether the check should be scheduled by the sensu client or server + - This option obviates the need for specifying the I(subscribers) option + choices: [ 'yes', 'no' ] + required: false + default: no + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it via the sensu api + choices: [ 'yes', 'no' ] + required: false + default: yes + occurrences: + description: + - Number of event occurrences before the handler should take action + required: false + default: 1 + refresh: + description: + - Number of seconds handlers should wait before taking second action + required: false + default: null + aggregate: + description: + - Classifies the check as an aggregate check, + - making it available via the aggregate API + choices: [ 'yes', 'no' ] + required: false + default: no + low_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null + high_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null +requirements: [ ] +author: Anders Ingemann +''' + +EXAMPLES = ''' +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: get cpu metrics + sensu_check: name=cpu_load + command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric=yes handlers=relay subscribers=common interval=60 + +# Check whether nginx is running +- name: check nginx process + sensu_check: name=nginx_running + command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid' + handlers=default subscribers=nginx interval=60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: check disk + sensu_check: name=check_disk_capacity +''' + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + import json + except ImportError: + import simplejson as json + + try: + with open(path) as stream: + config = json.load(stream) + except IOError as e: + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + with open(path, 'w') as stream: + stream.write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg=str(e)) + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': 'no'}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list'}, + 'subscribers': {'type': 'list'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list'}, + 'metric': {'type': 'bool', 'default': 'no'}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + +from ansible.module_utils.basic import * +main() From 35b6bc417d6b825189486a094b833c226ca30bb9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 19 Jun 2015 11:55:05 +0200 Subject: [PATCH 048/157] cloudstack: new module cs_facts --- cloud/cloudstack/cs_facts.py | 221 +++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 cloud/cloudstack/cs_facts.py diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py new file mode 100644 index 00000000000..f8749834120 --- /dev/null +++ b/cloud/cloudstack/cs_facts.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_facts +short_description: Gather facts on instances of Apache CloudStack based clouds. +description: + - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + filter: + description: + - Filter for a specific fact. + required: false + default: null + choices: + - cloudstack_service_offering + - cloudstack_availability_zone + - cloudstack_public_hostname + - cloudstack_public_ipv4 + - cloudstack_local_hostname + - cloudstack_local_ipv4 + - cloudstack_instance_id + - cloudstack_user_data +requirements: [ 'yaml' ] +''' + +EXAMPLES = ''' +# Gather all facts on instances +- name: Gather cloudstack facts + cs_facts: + +# Gather specific fact on instances +- name: Gather cloudstack facts + cs_facts: filter=cloudstack_instance_id +''' + +RETURN = ''' +--- +cloudstack_availability_zone: + description: zone the instance is deployed in. + returned: success + type: string + sample: ch-gva-2 +cloudstack_instance_id: + description: UUID of the instance. + returned: success + type: string + sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_hostname: + description: local hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_ipv4: + description: local IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_public_hostname: + description: public hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_public_ipv4: + description: public IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_service_offering: + description: service offering of the instance. + returned: success + type: string + sample: Micro 512mb 1cpu +cloudstack_user_data: + description: data of the instance provided by users. + returned: success + type: dict + sample: { "bla": "foo" } +''' + +import os + +try: + import yaml + has_lib_yaml = True +except ImportError: + has_lib_yaml = False + +CS_METADATA_BASE_URL = "http://%s/latest/meta-data" +CS_USERDATA_BASE_URL = "http://%s/latest/user-data" + +class CloudStackFacts(object): + + def __init__(self): + self.facts = ansible_facts(module) + self.api_ip = None + self.fact_paths = { + 'cloudstack_service_offering': 'service-offering', + 'cloudstack_availability_zone': 'availability-zone', + 'cloudstack_public_hostname': 'public-hostname', + 'cloudstack_public_ipv4': 'public-ipv4', + 'cloudstack_local_hostname': 'local-hostname', + 'cloudstack_local_ipv4': 'local-ipv4', + 'cloudstack_instance_id': 'instance-id' + } + + def run(self): + result = {} + filter = module.params.get('filter') + if not filter: + for key,path in self.fact_paths.iteritems(): + result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path) + result['cloudstack_user_data'] = self._get_user_data_json() + else: + if filter == 'cloudstack_user_data': + result['cloudstack_user_data'] = self._get_user_data_json() + elif filter in self.fact_paths: + result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter]) + return result + + + def _get_user_data_json(self): + try: + # this data come form users, we try what we can to parse it... + return yaml.load(self._fetch(CS_USERDATA_BASE_URL)) + except: + return None + + + def _fetch(self, path): + api_ip = self._get_api_ip() + if not api_ip: + return None + api_url = path % api_ip + (response, info) = fetch_url(module, api_url, force=True) + if response: + data = response.read() + else: + data = None + return data + + + def _get_dhcp_lease_file(self): + """Return the path of the lease file.""" + default_iface = self.facts['default_ipv4']['interface'] + dhcp_lease_file_locations = [ + '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu + '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6 + '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7 + '/var/db/dhclient.leases.%s' % default_iface, # openbsd + ] + for file_path in dhcp_lease_file_locations: + if os.path.exists(file_path): + return file_path + module.fail_json(msg="Could not find dhclient leases file.") + + + def _get_api_ip(self): + """Return the IP of the DHCP server.""" + if not self.api_ip: + dhcp_lease_file = self._get_dhcp_lease_file() + for line in open(dhcp_lease_file): + if 'dhcp-server-identifier' in line: + # get IP of string "option dhcp-server-identifier 185.19.28.176;" + line = line.translate(None, ';') + self.api_ip = line.split()[2] + break + if not self.api_ip: + module.fail_json(msg="No dhcp-server-identifier found in leases file.") + return self.api_ip + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + filter = dict(default=None, choices=[ + 'cloudstack_service_offering', + 'cloudstack_availability_zone', + 'cloudstack_public_hostname', + 'cloudstack_public_ipv4', + 'cloudstack_local_hostname', + 'cloudstack_local_ipv4', + 'cloudstack_instance_id', + 'cloudstack_user_data', + ]), + ), + supports_check_mode=False + ) + + if not has_lib_yaml: + module.fail_json(msg="missing python library: yaml") + + cs_facts = CloudStackFacts().run() + cs_facts_result = dict(changed=False, ansible_facts=cs_facts) + module.exit_json(**cs_facts_result) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.facts import * +main() From d0cf9617a54a49ecf819076555cce931a0f71683 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Fri, 19 Jun 2015 13:30:29 +0200 Subject: [PATCH 049/157] Spurious newline could corrupt payload Due to a spurious newline we corrupted the payload. It depends on the order of the headers and if there were headers added by vSphere. The Accept header was also not needed. --- cloud/vmware/vsphere_copy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy index f85beab481d..0ca9780c008 100644 --- a/cloud/vmware/vsphere_copy +++ b/cloud/vmware/vsphere_copy @@ -120,11 +120,10 @@ def main(): atexit.register(conn.close) remote_path = vmware_path(datastore, datacenter, dest) - auth = base64.encodestring('%s:%s' % (login, password)) + auth = base64.encodestring('%s:%s' % (login, password)).rstrip() headers = { "Content-Type": "application/octet-stream", "Content-Length": str(len(data)), - "Accept": "text/plain", "Authorization": "Basic %s" % auth, } From e203087aaabea0c0cefe6ae3d1b072ecbde84cf8 Mon Sep 17 00:00:00 2001 From: Andrew Udvare Date: Fri, 19 Jun 2015 06:04:56 -0700 Subject: [PATCH 050/157] Fix comparison --- packaging/os/portage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index e62b0983033..1043679585b 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -273,7 +273,7 @@ def emerge_packages(module, packages): if p[flag]: args.append(arg) - if 'usepkg' in p and 'usepkgonly' in p: + if p['usepkg'] and p['usepkgonly']: module.fail_json(msg='Use only one of usepkg, usepkgonly') cmd, (rc, out, err) = run_emerge(module, packages, *args) From 35a4e70deef1860eb944bdc73d6d8ca19af0444d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 12:46:16 -0400 Subject: [PATCH 051/157] minor fixes --- notification/hall.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/notification/hall.py b/notification/hall.py index 7c76e52379f..05c1a981b73 100755 --- a/notification/hall.py +++ b/notification/hall.py @@ -18,18 +18,18 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - + DOCUMENTATION = """ module: hall short_description: Send notification to Hall description: - - The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms. -version_added: 1.6 -author: Billy Kimble + - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms." +version_added: "2.0" +author: Billy Kimble (@bkimble) options: room_token: description: - - Room token provided to you by setting up the Ansible room integation on U(https://hall.com) + - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)" required: true msg: description: @@ -41,12 +41,12 @@ options: required: true picture: description: - - The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627) + - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)" required: false -""" +""" EXAMPLES = """ -- name: Send Hall notifiation +- name: Send Hall notifiation local_action: module: hall room_token: @@ -57,7 +57,7 @@ EXAMPLES = """ when: ec2.instances|length > 0 local_action: module: hall - room_token: + room_token: title: Server Creation msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." with_items: ec2.instances @@ -66,7 +66,7 @@ EXAMPLES = """ HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' def send_request_to_hall(module, room_token, payload): - headers = {'Content-Type': 'application/json'} + headers = {'Content-Type': 'application/json'} payload=module.jsonify(payload) api_endpoint = HALL_API_ENDPOINT % (room_token) response, info = fetch_url(module, api_endpoint, data=payload, headers=headers) @@ -83,7 +83,7 @@ def main(): picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'), ) ) - + room_token = module.params['room_token'] message = module.params['msg'] title = module.params['title'] From 1604382538db616867207bd1df1b05d893010213 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 11:04:25 -0400 Subject: [PATCH 052/157] monior docfixes added extensino to vsphere_copy so it actually installs --- cloud/amazon/ec2_ami_copy.py | 5 +---- cloud/amazon/ec2_eni.py | 6 +++--- cloud/amazon/ec2_eni_facts.py | 4 ++-- cloud/vmware/{vsphere_copy => vsphere_copy.py} | 4 ++-- 4 files changed, 8 insertions(+), 11 deletions(-) rename cloud/vmware/{vsphere_copy => vsphere_copy.py} (96%) diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py index 909ec4a9c7a..ff9bde88022 100644 --- a/cloud/amazon/ec2_ami_copy.py +++ b/cloud/amazon/ec2_ami_copy.py @@ -20,24 +20,21 @@ module: ec2_ami_copy short_description: copies AMI between AWS regions, return new image id description: - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5 -version_added: "1.7" +version_added: "2.0" options: source_region: description: - the source region that AMI should be copied from required: true - default: null region: description: - the destination region that AMI should be copied to required: true - default: null aliases: ['aws_region', 'ec2_region', 'dest_region'] source_image_id: description: - the id of the image in source region that should be copied required: true - default: null name: description: - The name of the new image to copy diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py index 2b34e9b9405..9e878e7d558 100644 --- a/cloud/amazon/ec2_eni.py +++ b/cloud/amazon/ec2_eni.py @@ -25,13 +25,13 @@ options: eni_id: description: - The ID of the ENI - required = false - default = null + required: false + default: null instance_id: description: - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. required: false - default: null + default: null private_ip_address: description: - Private IP address. diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py index 76347c84261..981358c33af 100644 --- a/cloud/amazon/ec2_eni_facts.py +++ b/cloud/amazon/ec2_eni_facts.py @@ -25,8 +25,8 @@ options: eni_id: description: - The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned. - required = false - default = null + required: false + default: null extends_documentation_fragment: aws ''' diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy.py similarity index 96% rename from cloud/vmware/vsphere_copy rename to cloud/vmware/vsphere_copy.py index 0ca9780c008..7c044a7d51a 100644 --- a/cloud/vmware/vsphere_copy +++ b/cloud/vmware/vsphere_copy.py @@ -55,8 +55,8 @@ options: - The file to push to the datastore on the vCenter server. required: true notes: - - This module ought to be run from a system that can access vCenter directly and has the file to transfer. - It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to). + - "This module ought to be run from a system that can access vCenter directly and has the file to transfer. + It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)." - Tested on vSphere 5.5 ''' From 4b29146c4d84a94c35e9f1bd763fcb85820e801c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 08:59:19 -0700 Subject: [PATCH 053/157] be explicit about urllib import and remove conditional urllib(2) import urllib and urllib2 have been in the python stdlib since at least python-2.3. There's no reason to conditionalize it. Fixes https://github.com/ansible/ansible/issues/11322 --- monitoring/airbrake_deployment.py | 5 +++-- monitoring/newrelic_deployment.py | 5 +++-- monitoring/rollbar_deployment.py | 1 + network/citrix/netscaler.py | 4 ++-- network/dnsmadeeasy.py | 4 +++- notification/flowdock.py | 5 +++-- notification/grove.py | 2 ++ notification/hipchat.py | 5 +++-- notification/nexmo.py | 1 + notification/sendgrid.py | 5 +---- notification/twilio.py | 5 +---- 11 files changed, 23 insertions(+), 19 deletions(-) diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index 3b54e55e751..a58df024182 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -61,8 +61,7 @@ options: default: 'yes' choices: ['yes', 'no'] -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -72,6 +71,8 @@ EXAMPLES = ''' revision=4.2 ''' +import urllib + # =========================================== # Module execution. # diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 832e467dea0..3d9bc6c0ec3 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -72,8 +72,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -83,6 +82,8 @@ EXAMPLES = ''' revision=1.0 ''' +import urllib + # =========================================== # Module execution. # diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 43e2aa00722..060193b78a5 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -76,6 +76,7 @@ EXAMPLES = ''' comment='Test Deploy' ''' +import urllib def main(): diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index 61bc35356e5..384a625bdca 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -81,7 +81,7 @@ options: default: 'yes' choices: ['yes', 'no'] -requirements: [ "urllib", "urllib2" ] +requirements: [] author: "Nandor Sivok (@dominis)" ''' @@ -99,7 +99,7 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api import base64 import socket - +import urllib class netscaler(object): diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index fcc7232a0da..cce7bd10082 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -86,7 +86,7 @@ notes: - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. -requirements: [ urllib, urllib2, hashlib, hmac ] +requirements: [ hashlib, hmac ] author: "Brice Burgess (@briceburg)" ''' @@ -113,6 +113,8 @@ EXAMPLES = ''' # DNSMadeEasy module specific support methods. # +import urllib + IMPORT_ERROR = None try: import json diff --git a/notification/flowdock.py b/notification/flowdock.py index 7c42e58644d..34dad8db375 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -85,8 +85,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [ ] ''' EXAMPLES = ''' @@ -104,6 +103,8 @@ EXAMPLES = ''' tags=tag1,tag2,tag3 ''' +import urllib + # =========================================== # Module execution. # diff --git a/notification/grove.py b/notification/grove.py index 85601d1cc78..4e4a0b5b684 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -49,6 +49,8 @@ EXAMPLES = ''' message=deployed {{ target }} ''' +import urllib + BASE_URL = 'https://grove.io/api/notice/%s/' # ============================================================== diff --git a/notification/hipchat.py b/notification/hipchat.py index 2498c11848c..32689965cf9 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -62,8 +62,7 @@ options: version_added: 1.6.0 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [ ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' @@ -75,6 +74,8 @@ EXAMPLES = ''' # HipChat module specific support methods. # +import urllib + DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" diff --git a/notification/nexmo.py b/notification/nexmo.py index d0c3d05e65c..89a246c0d90 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -71,6 +71,7 @@ EXAMPLES = """ msg: "{{ inventory_hostname }} completed" """ +import urllib NEXMO_API = 'https://rest.nexmo.com/sms/json' diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 78806687e0b..7a2ee3ad657 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -84,10 +84,7 @@ EXAMPLES = ''' # ======================================= # sendgrid module support methods # -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") +import urllib, urllib2 import base64 diff --git a/notification/twilio.py b/notification/twilio.py index e9ec5bcf51e..a2dd77fb2c0 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -104,10 +104,7 @@ EXAMPLES = ''' # ======================================= # twilio module support methods # -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") +import urllib, urllib2 import base64 From 1659af1541648765d955a48be9802703dacc052b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:05:50 -0400 Subject: [PATCH 054/157] made sensu_check 2.4 friendly --- monitoring/sensu_check.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index b968304c34f..eb9d0b7bf04 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -183,8 +183,8 @@ def sensu_check(module, path, name, state='present', backup=False): import simplejson as json try: - with open(path) as stream: - config = json.load(stream) + stream = open(path, 'r') + config = json.load(stream.read()) except IOError as e: if e.errno is 2: # File not found, non-fatal if state == 'absent': @@ -196,6 +196,9 @@ def sensu_check(module, path, name, state='present', backup=False): except ValueError: msg = '{path} contains invalid JSON'.format(path=path) module.fail_json(msg=msg) + finally: + if stream: + stream.close() if 'checks' not in config: if state == 'absent': @@ -274,10 +277,13 @@ def sensu_check(module, path, name, state='present', backup=False): if backup: module.backup_local(path) try: - with open(path, 'w') as stream: - stream.write(json.dumps(config, indent=2) + '\n') + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') except IOError as e: module.fail_json(msg=str(e)) + finally: + if stream: + stream.close() return changed, reasons From dd6e8f354aaeeeaccc1566ab14cfd368d6ec1f72 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 09:07:04 -0700 Subject: [PATCH 055/157] Modify a few more modules to not conditionalize urllib(2) import. --- monitoring/librato_annotation.py | 7 +------ notification/sendgrid.py | 3 ++- notification/twilio.py | 3 ++- notification/typetalk.py | 16 +++++----------- 4 files changed, 10 insertions(+), 19 deletions(-) diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index 88d3bb81f7b..c606dfdc9a0 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -31,7 +31,6 @@ description: version_added: "1.6" author: "Seth Edwards (@sedward)" requirements: - - urllib2 - base64 options: user: @@ -107,11 +106,7 @@ EXAMPLES = ''' ''' -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False +import urllib2 def post_annotation(module): user = module.params['user'] diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 7a2ee3ad657..e1ae7b7749f 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -84,7 +84,8 @@ EXAMPLES = ''' # ======================================= # sendgrid module support methods # -import urllib, urllib2 +import urllib +import urllib2 import base64 diff --git a/notification/twilio.py b/notification/twilio.py index a2dd77fb2c0..ee12d987e9e 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -104,7 +104,8 @@ EXAMPLES = ''' # ======================================= # twilio module support methods # -import urllib, urllib2 +import urllib +import urllib2 import base64 diff --git a/notification/typetalk.py b/notification/typetalk.py index 638f97ae530..002c8b5cc85 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -25,7 +25,7 @@ options: description: - message body required: true -requirements: [ urllib, urllib2, json ] +requirements: [ json ] author: "Takashi Someda (@tksmd)" ''' @@ -33,15 +33,9 @@ EXAMPLES = ''' - typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" ''' -try: - import urllib -except ImportError: - urllib = None +import urllib -try: - import urllib2 -except ImportError: - urllib2 = None +import urllib2 try: import json @@ -96,8 +90,8 @@ def main(): supports_check_mode=False ) - if not (urllib and urllib2 and json): - module.fail_json(msg="urllib, urllib2 and json modules are required") + if not json: + module.fail_json(msg="json module is required") client_id = module.params["client_id"] client_secret = module.params["client_secret"] From eeb9d3481256b038e69638618f9d3a566e24b6c6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:10:14 -0400 Subject: [PATCH 056/157] also fixed exceptions --- monitoring/sensu_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index eb9d0b7bf04..5c932a1d303 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -185,7 +185,7 @@ def sensu_check(module, path, name, state='present', backup=False): try: stream = open(path, 'r') config = json.load(stream.read()) - except IOError as e: + except IOError, e: if e.errno is 2: # File not found, non-fatal if state == 'absent': reasons.append('file did not exist and state is `absent\'') @@ -279,7 +279,7 @@ def sensu_check(module, path, name, state='present', backup=False): try: stream = open(path, 'w') stream.write(json.dumps(config, indent=2) + '\n') - except IOError as e: + except IOError, e: module.fail_json(msg=str(e)) finally: if stream: From 286bc3d9dc80e2bb3215de823ab5ed6c2a35342c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:13:43 -0400 Subject: [PATCH 057/157] forgot finally 2.4 syntax --- monitoring/sensu_check.py | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index 5c932a1d303..a1bd36ca665 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -183,19 +183,20 @@ def sensu_check(module, path, name, state='present', backup=False): import simplejson as json try: - stream = open(path, 'r') - config = json.load(stream.read()) - except IOError, e: - if e.errno is 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=str(e)) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) + try: + stream = open(path, 'r') + config = json.load(stream.read()) + except IOError, e: + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) finally: if stream: stream.close() @@ -277,10 +278,11 @@ def sensu_check(module, path, name, state='present', backup=False): if backup: module.backup_local(path) try: - stream = open(path, 'w') - stream.write(json.dumps(config, indent=2) + '\n') - except IOError, e: - module.fail_json(msg=str(e)) + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError, e: + module.fail_json(msg=str(e)) finally: if stream: stream.close() From edc423a18a800ae4b6b30ff6a7dae444a66f10e5 Mon Sep 17 00:00:00 2001 From: Christopher Troup Date: Thu, 18 Jun 2015 18:08:50 -0400 Subject: [PATCH 058/157] Add support for creating and deleting Route53 hosted zones Supports both private (per-VPC) and public hosted zones. --- cloud/amazon/route53_zone.py | 148 +++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 cloud/amazon/route53_zone.py diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py new file mode 100644 index 00000000000..01eb11eb672 --- /dev/null +++ b/cloud/amazon/route53_zone.py @@ -0,0 +1,148 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +module: route53_zone +short_description: add or delete Route53 zones +description: + - Creates and deletes Route53 private and public zones +options: + zone: + description: + - The DNS zone record (eg: foo.com.) + required: true + default: null + command: + description: + - whether or not the zone should exist or not + required: false + default: true + vpc_id: + description: + - The VPC ID the zone should be a part of (if this is going to be a private zone) + required: false + default: null + vpc_region: + description: + - The VPC Region the zone should be a part of (if this is going to be a private zone) + required: false + default: null + comment: + description: + - Comment associated with the zone + required: false + default: '' +extends_documentation_fragment: aws +''' + +import time + +try: + import boto + import boto.ec2 + from boto import route53 + from boto.route53 import Route53Connection + from boto.route53.zone import Zone + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + zone=dict(required=True), + command=dict(default='create', choices=['create', 'delete']), + vpc_id=dict(default=None), + vpc_region=dict(default=None), + comment=dict(default=''), + ) + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + zone_in = module.params.get('zone').lower() + command = module.params.get('command').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + comment = module.params.get('comment') + + private_zone = vpc_id is not None and vpc_region is not None + + _, _, aws_connect_kwargs = get_aws_connection_info(module) + + # connect to the route53 endpoint + try: + conn = Route53Connection(**aws_connect_kwargs) + except boto.exception.BotoServerError, e: + module.fail_json(msg=e.error_message) + + results = conn.get_all_hosted_zones() + zones = {} + + for r53zone in results['ListHostedZonesResponse']['HostedZones']: + zone_id = r53zone['Id'].replace('/hostedzone/', '') + zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse'] + if vpc_id and 'VPCs' in zone_details: + # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 + if isinstance(zone_details['VPCs'], dict): + if zone_details['VPCs']['VPC']['VPCId'] == vpc_id: + zones[r53zone['Name']] = zone_id + else: # Forward compatibility for when boto fixes that bug + if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: + zones[r53zone['Name']] = zone_id + else: + zones[r53zone['Name']] = zone_id + + record = { + 'private_zone': private_zone, + 'vpc_id': vpc_id, + 'vpc_region': vpc_region, + 'comment': comment, + } + + if command == 'create' and zone_in in zones: + if private_zone: + details = conn.get_hosted_zone(zones[zone_in]) + + if 'VPCs' not in details['GetHostedZoneResponse']: + module.fail_json( + msg="Can't change VPC from public to private" + ) + + vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC'] + current_vpc_id = vpc_details['VPCId'] + current_vpc_region = vpc_details['VPCRegion'] + + if current_vpc_id != vpc_id: + module.fail_json( + msg="Can't change VPC ID once a zone has been created" + ) + if current_vpc_region != vpc_region: + module.fail_json( + msg="Can't change VPC Region once a zone has been created" + ) + + record['zone_id'] = zones[zone_in] + record['name'] = zone_in + module.exit_json(changed=False, set=record) + + elif command == 'create': + result = conn.create_hosted_zone(zone_in, **record) + hosted_zone = result['CreateHostedZoneResponse']['HostedZone'] + zone_id = hosted_zone['Id'].replace('/hostedzone/', '') + record['zone_id'] = zone_id + record['name'] = zone_in + module.exit_json(changed=True, set=record) + + elif command == 'delete' and zone_in in zones: + conn.delete_hosted_zone(zones[zone_in]) + module.exit_json(changed=True) + + elif command == 'delete': + module.exit_json(changed=False) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 86ae387fdcb7cedf5f658d55e36ddbbf31c59631 Mon Sep 17 00:00:00 2001 From: Christopher Troup Date: Fri, 19 Jun 2015 11:14:27 -0400 Subject: [PATCH 059/157] Update documentation to include usual fields - Adds version_added - Adds author - Removed default: null from a required field --- cloud/amazon/route53_zone.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py index 01eb11eb672..ca9cca8d9f6 100644 --- a/cloud/amazon/route53_zone.py +++ b/cloud/amazon/route53_zone.py @@ -5,12 +5,12 @@ module: route53_zone short_description: add or delete Route53 zones description: - Creates and deletes Route53 private and public zones +version_added: "2.0" options: zone: description: - The DNS zone record (eg: foo.com.) required: true - default: null command: description: - whether or not the zone should exist or not @@ -32,6 +32,7 @@ options: required: false default: '' extends_documentation_fragment: aws +author: "Christopher Troup (@minichate)" ''' import time From 8c643498d37a7c85358a746d6f5467f6d3c34d60 Mon Sep 17 00:00:00 2001 From: Christopher Troup Date: Fri, 19 Jun 2015 11:22:01 -0400 Subject: [PATCH 060/157] Use state: present|absent rather than command: create|delete --- cloud/amazon/route53_zone.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py index ca9cca8d9f6..2383563fafe 100644 --- a/cloud/amazon/route53_zone.py +++ b/cloud/amazon/route53_zone.py @@ -11,11 +11,12 @@ options: description: - The DNS zone record (eg: foo.com.) required: true - command: + state: description: - whether or not the zone should exist or not required: false default: true + choices: [ "present", "absent" ] vpc_id: description: - The VPC ID the zone should be a part of (if this is going to be a private zone) @@ -52,7 +53,7 @@ def main(): module = AnsibleModule( argument_spec=dict( zone=dict(required=True), - command=dict(default='create', choices=['create', 'delete']), + state=dict(default='present', choices=['present', 'absent']), vpc_id=dict(default=None), vpc_region=dict(default=None), comment=dict(default=''), @@ -63,7 +64,7 @@ def main(): module.fail_json(msg='boto required for this module') zone_in = module.params.get('zone').lower() - command = module.params.get('command').lower() + state = module.params.get('state').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') comment = module.params.get('comment') @@ -102,7 +103,7 @@ def main(): 'comment': comment, } - if command == 'create' and zone_in in zones: + if state == 'present' and zone_in in zones: if private_zone: details = conn.get_hosted_zone(zones[zone_in]) @@ -128,7 +129,7 @@ def main(): record['name'] = zone_in module.exit_json(changed=False, set=record) - elif command == 'create': + elif state == 'present': result = conn.create_hosted_zone(zone_in, **record) hosted_zone = result['CreateHostedZoneResponse']['HostedZone'] zone_id = hosted_zone['Id'].replace('/hostedzone/', '') @@ -136,11 +137,11 @@ def main(): record['name'] = zone_in module.exit_json(changed=True, set=record) - elif command == 'delete' and zone_in in zones: + elif state == 'absent' and zone_in in zones: conn.delete_hosted_zone(zones[zone_in]) module.exit_json(changed=True) - elif command == 'delete': + elif state == 'absent': module.exit_json(changed=False) from ansible.module_utils.basic import * From fc43c3a8fd57849133177fd9782e8abe2be467c4 Mon Sep 17 00:00:00 2001 From: Robert Buchholz Date: Wed, 25 Mar 2015 16:14:13 +0100 Subject: [PATCH 061/157] patch: Add binary option that maps to --binary to handle CLRF patches --- files/patch.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/files/patch.py b/files/patch.py index c1a61ce733f..22491ef6ac4 100644 --- a/files/patch.py +++ b/files/patch.py @@ -70,6 +70,12 @@ options: description: - passes --backup --version-control=numbered to patch, producing numbered backup copies + binary: + version_added: "2.0" + description: + - Setting to true will disable patch's heuristic for transforming CRLF + line endings into LF. Line endings of src and dest must match. If set to + False, patch will replace CRLF in src files on POSIX. required: false type: "bool" default: "False" @@ -98,10 +104,12 @@ class PatchError(Exception): pass -def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0): +def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0): opts = ['--quiet', '--reverse', '--forward', '--dry-run', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] + if binary: + opts.append('--binary') if dest_file: opts.append("'%s'" % dest_file) @@ -109,12 +117,14 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0) return rc == 0 -def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False): +def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False): opts = ['--quiet', '--forward', '--batch', '--reject-file=-', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] if dry_run: opts.append('--dry-run') + if binary: + opts.append('--binary') if dest_file: opts.append("'%s'" % dest_file) if backup: @@ -136,7 +146,8 @@ def main(): 'remote_src': {'default': False, 'type': 'bool'}, # NB: for 'backup' parameter, semantics is slightly different from standard # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") - 'backup': { 'default': False, 'type': 'bool' } + 'backup': {'default': False, 'type': 'bool'}, + 'binary': {'default': False, 'type': 'bool'}, }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -167,9 +178,9 @@ def main(): p.src = os.path.abspath(p.src) changed = False - if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): + if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip): try: - apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, dry_run=module.check_mode, backup=p.backup ) changed = True except PatchError, e: From 268104fca321a777e279ed20d252e43da23a2b9a Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 20 Jun 2015 21:24:36 +1000 Subject: [PATCH 062/157] Added check_mode support to dynamodb_table module. --- cloud/amazon/dynamodb_table | 51 ++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table index 7a200a3b271..b59280a2e23 100644 --- a/cloud/amazon/dynamodb_table +++ b/cloud/amazon/dynamodb_table @@ -39,7 +39,7 @@ options: hash_key_name: description: - Name of the hash key. - - Required when state=present. + - Required when C(state=present). required: false hash_key_type: description: @@ -109,10 +109,10 @@ try: from boto.dynamodb2.fields import HashKey, RangeKey from boto.dynamodb2.types import STRING, NUMBER, BINARY from boto.exception import BotoServerError, JSONResponseError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False DYNAMO_TYPE_MAP = { @@ -132,8 +132,8 @@ def create_or_update_dynamo_table(connection, module): write_capacity = module.params.get('write_capacity') schema = [ - HashKey(hash_key_name, map_dynamo_type(hash_key_type)), - RangeKey(range_key_name, map_dynamo_type(range_key_type)) + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)), + RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type)) ] throughput = { 'read': read_capacity, @@ -155,13 +155,14 @@ def create_or_update_dynamo_table(connection, module): table = Table(table_name, connection=connection) if dynamo_table_exists(table): - changed = update_dynamo_table(table, throughput=throughput) + result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode) else: - Table.create(table_name, connection=connection, schema=schema, throughput=throughput) - changed = True + if not module.check_mode: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + result['changed'] = True - result['table_status'] = table.describe()['Table']['TableStatus'] - result['changed'] = changed + if not module.check_mode: + result['table_status'] = table.describe()['Table']['TableStatus'] except BotoServerError: result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() @@ -171,7 +172,7 @@ def create_or_update_dynamo_table(connection, module): def delete_dynamo_table(connection, module): - table_name = module.params.get('table_name') + table_name = module.params.get('name') result = dict( region=module.params.get('region'), @@ -179,14 +180,15 @@ def delete_dynamo_table(connection, module): ) try: - changed = False table = Table(table_name, connection=connection) if dynamo_table_exists(table): - table.delete() - changed = True + if not module.check_mode: + table.delete() + result['changed'] = True - result['changed'] = changed + else: + result['changed'] = False except BotoServerError: result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() @@ -207,12 +209,14 @@ def dynamo_table_exists(table): raise e -def update_dynamo_table(table, throughput=None): +def update_dynamo_table(table, throughput=None, check_mode=False): table.describe() # populate table details - # AWS complains if the throughput hasn't changed if has_throughput_changed(table, throughput): - return table.update(throughput=throughput) + if not check_mode: + return table.update(throughput=throughput) + else: + return True return False @@ -225,10 +229,6 @@ def has_throughput_changed(table, new_throughput): new_throughput['write'] != table.throughput['write'] -def map_dynamo_type(dynamo_type): - return DYNAMO_TYPE_MAP.get(dynamo_type) - - def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -242,7 +242,12 @@ def main(): write_capacity=dict(default=1, type='int'), )) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) connection = boto.dynamodb2.connect_to_region(region) From 011fef5f3275b5a1cf55a9c578c61d2dde0d3f99 Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 20 Jun 2015 21:34:27 +1000 Subject: [PATCH 063/157] Added return value documentation to dynamodb_table module. --- cloud/amazon/dynamodb_table | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table index b59280a2e23..89a7e0fbb2e 100644 --- a/cloud/amazon/dynamodb_table +++ b/cloud/amazon/dynamodb_table @@ -102,6 +102,14 @@ EXAMPLES = ''' state: absent ''' +RETURN = ''' +table_status: + description: The current status of the table. + returned: success + type: string + sample: ACTIVE +''' + try: import boto import boto.dynamodb2 From ac09e609146c3f8c8ef46dc22ab75834aa5d20dc Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sun, 21 Jun 2015 08:40:57 +1000 Subject: [PATCH 064/157] Add .py file extension to dynamodb_table module. --- cloud/amazon/{dynamodb_table => dynamodb_table.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cloud/amazon/{dynamodb_table => dynamodb_table.py} (100%) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table.py similarity index 100% rename from cloud/amazon/dynamodb_table rename to cloud/amazon/dynamodb_table.py From 75e1e9fcda109b223487de752356066035059ae7 Mon Sep 17 00:00:00 2001 From: Eike Frost Date: Tue, 28 Apr 2015 19:41:54 +0200 Subject: [PATCH 065/157] add zabbix proxy support to zabbix_host --- monitoring/zabbix_host.py | 49 ++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 772e92cb32d..6fac82c7177 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -79,6 +79,10 @@ options: description: - The timeout of API request (seconds). default: 10 + proxy: + description: + - The name of the Zabbix Proxy to be used + default: None interfaces: description: - List of interfaces to be created for the host (see example below). @@ -118,6 +122,7 @@ EXAMPLES = ''' ip: 10.xx.xx.xx dns: "" port: 12345 + proxy: a.zabbix.proxy ''' import logging @@ -174,21 +179,25 @@ class Host(object): template_ids.append(template_id) return template_ids - def add_host(self, host_name, group_ids, status, interfaces): + def add_host(self, host_name, group_ids, status, interfaces, proxy_id): try: if self._module.check_mode: self._module.exit_json(changed=True) - host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}) + parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status} + if proxy_id: + parameters['proxy_hostid'] = proxy_id + host_list = self._zapi.host.create(parameters) if len(host_list) >= 1: return host_list['hostids'][0] except Exception, e: self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) - def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list): + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id): try: if self._module.check_mode: self._module.exit_json(changed=True) - self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status}) + parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id} + self._zapi.host.update(parameters) interface_list_copy = exist_interface_list if interfaces: for interface in interfaces: @@ -234,6 +243,14 @@ class Host(object): else: return host_list[0] + # get proxyid by proxy name + def get_proxyid_by_proxy_name(self, proxy_name): + proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}}) + if len(proxy_list) < 1: + self._module.fail_json(msg="Proxy not found: %s" % proxy_name) + else: + return proxy_list[0]['proxyid'] + # get group ids by group names def get_group_ids_by_group_names(self, group_names): group_ids = [] @@ -294,7 +311,7 @@ class Host(object): # check all the properties before link or clear template def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, host): + exist_interfaces, host, proxy_id): # get the existing host's groups exist_host_groups = self.get_host_groups_by_host_id(host_id) if set(host_groups) != set(exist_host_groups): @@ -314,6 +331,9 @@ class Host(object): if set(list(template_ids)) != set(exist_template_ids): return True + if host['proxy_hostid'] != proxy_id: + return True + return False # link or clear template of the host @@ -349,7 +369,8 @@ def main(): status=dict(default="enabled", choices=['enabled', 'disabled']), state=dict(default="present", choices=['present', 'absent']), timeout=dict(type='int', default=10), - interfaces=dict(required=False) + interfaces=dict(required=False), + proxy=dict(required=False) ), supports_check_mode=True ) @@ -367,6 +388,7 @@ def main(): state = module.params['state'] timeout = module.params['timeout'] interfaces = module.params['interfaces'] + proxy = module.params['proxy'] # convert enabled to 0; disabled to 1 status = 1 if status == "disabled" else 0 @@ -396,6 +418,11 @@ def main(): if interface['type'] == 1: ip = interface['ip'] + proxy_id = "0" + + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + # check if host exist is_host_exist = host.is_host_exist(host_name) @@ -421,10 +448,10 @@ def main(): if len(exist_interfaces) > interfaces_len: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, zabbix_host_obj): + exist_interfaces, zabbix_host_obj, proxy_id): host.link_or_clear_template(host_id, template_ids) host.update_host(host_name, group_ids, status, host_id, - interfaces, exist_interfaces) + interfaces, exist_interfaces, proxy_id) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" % (host_name, ip, link_templates)) @@ -432,8 +459,8 @@ def main(): module.exit_json(changed=False) else: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces_copy, zabbix_host_obj): - host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces) + exist_interfaces_copy, zabbix_host_obj, proxy_id): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id) host.link_or_clear_template(host_id, template_ids) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" @@ -448,7 +475,7 @@ def main(): module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) # create host - host_id = host.add_host(host_name, group_ids, status, interfaces) + host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id) host.link_or_clear_template(host_id, template_ids) module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( host_name, ip, link_templates)) From 1a914128f6d172da7ea349d6b070758e1ebbff9c Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 22 Jun 2015 20:23:11 +1000 Subject: [PATCH 066/157] Fix aws connection to use params. --- cloud/amazon/dynamodb_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 89a7e0fbb2e..130fae44721 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -258,7 +258,7 @@ def main(): module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - connection = boto.dynamodb2.connect_to_region(region) + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) state = module.params.get('state') if state == 'present': From 92744ef5581d108eba3e17d539fc810de2a36e5f Mon Sep 17 00:00:00 2001 From: Phil Date: Mon, 22 Jun 2015 08:55:41 -0500 Subject: [PATCH 067/157] fixes typo --- windows/win_unzip.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index 35a55c811c4..51b092f4665 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -89,7 +89,7 @@ Else { Fail-Json "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types." } Else { - Set-Attr $result.win_zip "pscx_status" "present" + Set-Attr $result.win_unzip "pscx_status" "present" } # Import From 0ad12cdcf4e5d4ed90b506917ee5083b1910b0e2 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Mon, 22 Jun 2015 20:09:54 +0200 Subject: [PATCH 068/157] specify int parameter types for wait_interval and wait_retries --- network/haproxy.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/network/haproxy.py b/network/haproxy.py index 690aa60bbba..cd17d057b5f 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -78,13 +78,13 @@ options: description: - number of times to check for status after changing the state required: false - default: 20 + default: 25 version_added: "2.0" wait_interval: description: - number of seconds to wait between retries required: false - default: 1 + default: 5 version_added: "2.0" ''' @@ -129,7 +129,7 @@ import time DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] -WAIT_RETRIES=20 +WAIT_RETRIES=25 WAIT_INTERVAL=1 ###################################################################### @@ -302,9 +302,9 @@ def main(): weight=dict(required=False, default=None), socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION), shutdown_sessions=dict(required=False, default=False), - wait=dict(required=False, default=False), - wait_retries=dict(required=False, default=WAIT_RETRIES), - wait_interval=dict(required=False, default=WAIT_INTERVAL), + wait=dict(required=False, default=False, type='bool'), + wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'), + wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'), ), ) From 2612da50ad637bb469431df699c82b5f68d255e6 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Mon, 22 Jun 2015 20:13:12 +0200 Subject: [PATCH 069/157] wait_interval default value did not match the documented value --- network/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/haproxy.py b/network/haproxy.py index cd17d057b5f..6d4f6a4279a 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -130,7 +130,7 @@ DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] WAIT_RETRIES=25 -WAIT_INTERVAL=1 +WAIT_INTERVAL=5 ###################################################################### class TimeoutException(Exception): From 03ce40a62ebb1d8ceb8d2f6f7bebb1b4b90458c0 Mon Sep 17 00:00:00 2001 From: Phil Date: Mon, 22 Jun 2015 18:51:58 -0500 Subject: [PATCH 070/157] removes restart functionality, and added creates param for idempotency --- windows/win_unzip.ps1 | 18 ++++++++++-------- windows/win_unzip.py | 39 +++++---------------------------------- 2 files changed, 15 insertions(+), 42 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index 51b092f4665..e4509a290a2 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -26,6 +26,13 @@ $result = New-Object psobject @{ changed = $false } +If ($params.creates) { + If (Test-Path $params.creates) { + Exit-Json $result "The 'creates' file or directory already exists." + } + +} + If ($params.src) { $src = $params.src.toString() @@ -86,7 +93,7 @@ Else { $list = Get-Module -ListAvailable If (-Not ($list -match "PSCX")) { - Fail-Json "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types." + Fail-Json $result "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types." } Else { Set-Attr $result.win_unzip "pscx_status" "present" @@ -122,10 +129,10 @@ Else { } Catch { If ($recurse) { - Fail-Json "Error recursively expanding $src to $dest" + Fail-Json $result "Error recursively expanding $src to $dest" } Else { - Fail-Json "Error expanding $src to $dest" + Fail-Json $result "Error expanding $src to $dest" } } } @@ -135,11 +142,6 @@ If ($rm -eq $true){ Set-Attr $result.win_unzip "rm" "true" } -If ($params.restart -eq "true" -Or $params.restart -eq "yes") { - Restart-Computer -Force - Set-Attr $result.win_unzip "restart" "true" -} - # Fixes a fail error message (when the task actually succeeds) for a "Convert-ToJson: The converted JSON string is in bad format" # This happens when JSON is parsing a string that ends with a "\", which is possible when specifying a directory to download to. # This catches that possible error, before assigning the JSON $result diff --git a/windows/win_unzip.py b/windows/win_unzip.py index 2c3c41df0b7..7c5ac322b97 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -63,16 +63,11 @@ options: - yes - no aliases: [] - restart: + creates: description: - - Restarts the computer after unzip, can be useful for hotfixes such as http://support.microsoft.com/kb/2842230 (Restarts will have to be accounted for with wait_for module) - choices: - - true - - false - - yes - - no - required: false - default: false + - If this file or directory exists the specified src will not be extracted. + required: no + default: null aliases: [] author: Phil Schwartz ''' @@ -88,6 +83,7 @@ $ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=t win_unzip: src: "C:\Users\Phil\Logs.bz2" dest: "C:\Users\Phil\OldLogs" + creates: "C:\Users\Phil\OldLogs" # This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion. --- @@ -102,31 +98,6 @@ $ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=t recurse: yes rm: true -# Install hotfix (self-extracting .exe) ---- -- name: Install WinRM PowerShell Hotfix for Windows Server 2008 SP1 - hosts: all - gather_facts: false - tasks: - - name: Grab Hotfix from URL - win_get_url: - url: 'http://hotfixv4.microsoft.com/Windows%207/Windows%20Server2008%20R2%20SP1/sp2/Fix467402/7600/free/463984_intl_x64_zip.exe' - dest: 'C:\\463984_intl_x64_zip.exe' - - name: Unzip hotfix - win_unzip: - src: "C:\\463984_intl_x64_zip.exe" - dest: "C:\\Hotfix" - recurse: true - restart: true - - name: Wait for server reboot... - local_action: - module: wait_for - host={{ inventory_hostname }} - port={{ansible_ssh_port|default(5986)}} - delay=15 - timeout=600 - state=started - # Install PSCX to use for extracting a gz file - name: Grab PSCX msi win_get_url: From d8063b913ee49f03236c30a3d90b6e106c949f3f Mon Sep 17 00:00:00 2001 From: jpic Date: Tue, 23 Jun 2015 19:36:43 +0200 Subject: [PATCH 071/157] Define HAS_LXC even if import lxc doesn't fail. This fixes:: Traceback (most recent call last): File "/home/jpic/.ansible/tmp/ansible-tmp-1435080800.61-38257321141340/lxc_container", line 3353, in main() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080800.61-38257321141340/lxc_container", line 1712, in main if not HAS_LXC: NameError: global name 'HAS_LXC' is not defined --- cloud/lxc/lxc_container.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index e6d70f4e487..2264a86c40c 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -385,6 +385,8 @@ try: import lxc except ImportError: HAS_LXC = False +else: + HAS_LXC = True # LXC_COMPRESSION_MAP is a map of available compression types when creating From c4d24721483af1e347b7408c8d19cf1617a6a91f Mon Sep 17 00:00:00 2001 From: jpic Date: Tue, 23 Jun 2015 19:38:51 +0200 Subject: [PATCH 072/157] Fixed lxc option parsing. This fixes:: Traceback (most recent call last): File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 3355, in main() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1724, in main lxc_manage.run() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1605, in run action() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1145, in _started self._config() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 714, in _config _, _value = option_line.split('=') ValueError: too many values to unpack With such a task:: tasks: - lxc_container: name: buildbot-master container_config: - "lxc.mount.entry = {{ cwd }} srv/peopletest none defaults,bind,uid=0,create=dir 0 0" --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index e6d70f4e487..090d4f73c97 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -708,7 +708,7 @@ class LxcContainerManagement(object): for option_line in container_config: # Look for key in config if option_line.startswith(key): - _, _value = option_line.split('=') + _, _value = option_line.split('=', 1) config_value = ' '.join(_value.split()) line_index = container_config.index(option_line) # If the sanitized values don't match replace them From ebe1904e59aaa9a459c3993bce6a499dc5bd9b73 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 23 Jun 2015 14:12:07 -0500 Subject: [PATCH 073/157] Add missing __init__.py --- cloud/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/rackspace/__init__.py diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From d5d84288ae0abba26cb8f66ae0ef9f2db07f306c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 23 Jun 2015 14:12:17 -0500 Subject: [PATCH 074/157] Bump version_added to 2.0 --- cloud/rackspace/rax_mon_alarm.py | 2 +- cloud/rackspace/rax_mon_check.py | 2 +- cloud/rackspace/rax_mon_entity.py | 2 +- cloud/rackspace/rax_mon_notification.py | 2 +- cloud/rackspace/rax_mon_notification_plan.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f9b97bc8dd1..a3f29e22f50 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -27,7 +27,7 @@ description: notifications. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> *rax_mon_alarm* -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 101efd3c858..14b86864e2f 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -28,7 +28,7 @@ description: monitor. Rackspace monitoring module flow | rax_mon_entity -> *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 5f82ff9c524..f5f142d2165 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -26,7 +26,7 @@ description: provide a convenient, centralized place to store IP addresses. Rackspace monitoring module flow | *rax_mon_entity* -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: label: description: diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 8a21b088c5e..d7b6692dc2c 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -25,7 +25,7 @@ description: channel that can be used to communicate alarms, such as email, webhooks, or PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index 05b89b2cfb3..5bb3fa1652a 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -26,7 +26,7 @@ description: associating existing rax_mon_notifications with severity levels. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: From f1e3260b3f97e37ae70788b42f089dd53f591b99 Mon Sep 17 00:00:00 2001 From: Arnaud Dematte Date: Tue, 21 Apr 2015 14:48:44 +0200 Subject: [PATCH 075/157] Update mail.py to allow html content Adding parameter subtype to allow html based content. The default behavior of text/plain has been preserved. --- notification/mail.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index c42e80fdabf..52869460862 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -110,6 +110,11 @@ options: - The character set of email being sent default: 'us-ascii' required: false + subtype: + description: + - The minor mime type, can be either text or html. The major type is always text. + default: 'plain' + required: false """ EXAMPLES = ''' @@ -183,7 +188,8 @@ def main(): body = dict(default=None), attach = dict(default=None), headers = dict(default=None), - charset = dict(default='us-ascii') + charset = dict(default='us-ascii'), + subtype = dict(default='plain') ) ) @@ -200,6 +206,7 @@ def main(): attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') + subtype = module.params.get('subtype') sender_phrase, sender_addr = parseaddr(sender) if not body: @@ -259,7 +266,7 @@ def main(): if len(cc_list) > 0: msg['Cc'] = ", ".join(cc_list) - part = MIMEText(body + "\n\n", _charset=charset) + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) msg.attach(part) if attach_files is not None: From 955bf92ff892a7359a045e1ddb3b29b7809a230b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 06:53:28 -0700 Subject: [PATCH 076/157] Add version_added to the subtype parameter --- notification/mail.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/mail.py b/notification/mail.py index 52869460862..8be9a589cbf 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -115,6 +115,7 @@ options: - The minor mime type, can be either text or html. The major type is always text. default: 'plain' required: false + version_added: "2.0" """ EXAMPLES = ''' From 9183170a4a0e8d1ccfdf8c3535ad3b28ca25b22c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 07:05:29 -0700 Subject: [PATCH 077/157] These modules were added to version 2.0, not 1.9 --- windows/win_iis_virtualdirectory.py | 2 +- windows/win_iis_webapplication.py | 2 +- windows/win_iis_webapppool.py | 2 +- windows/win_iis_webbinding.py | 2 +- windows/win_iis_website.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index bbedfbbb4ab..c8a5dd1dcc8 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_virtualdirectory -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS virtual directories. description: - Creates, Removes and configures a IIS Web site diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py index d8a59b66054..11a338e71e0 100644 --- a/windows/win_iis_webapplication.py +++ b/windows/win_iis_webapplication.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_website -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web application. description: - Creates, Removes and configures a IIS Web applications diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py index 320fe07f637..c77c3b04cb7 100644 --- a/windows/win_iis_webapppool.py +++ b/windows/win_iis_webapppool.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: win_iis_webapppool -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web Application Pool. description: - Creates, Removes and configures a IIS Web Application Pool diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py index 0cc5da158bf..061bed73723 100644 --- a/windows/win_iis_webbinding.py +++ b/windows/win_iis_webbinding.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: win_iis_webbinding -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web site. description: - Creates, Removes and configures a binding to an existing IIS Web site diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py index 0893b11c2bd..8921afe5970 100644 --- a/windows/win_iis_website.py +++ b/windows/win_iis_website.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_website -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web site. description: - Creates, Removes and configures a IIS Web site From dec7d95d514ca89c2784b63d836dd6fb872bdd9c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 07:12:10 -0700 Subject: [PATCH 078/157] Fix up docs --- cloud/amazon/dynamodb_table.py | 1 + windows/win_iis_virtualdirectory.py | 4 ++-- windows/win_iis_webapplication.py | 14 +++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 130fae44721..94d1f4616bb 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -23,6 +23,7 @@ description: - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. author: Alan Loi (@loia) +version_added: "2.0" requirements: - "boto >= 2.13.2" options: diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index c8a5dd1dcc8..e5bbd950007 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -28,13 +28,13 @@ description: options: name: description: - - The name of the virtual directory to create. + - The name of the virtual directory to create or remove required: true default: null aliases: [] state: description: - - + - Whether to add or remove the specified virtual directory choices: - absent - present diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py index 11a338e71e0..b8ebd085162 100644 --- a/windows/win_iis_webapplication.py +++ b/windows/win_iis_webapplication.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: win_iis_website +module: win_iis_webapplication version_added: "2.0" short_description: Configures a IIS Web application. description: @@ -32,12 +32,12 @@ options: required: true default: null aliases: [] - site: - description: - - Name of the site on which the application is created. - required: true - default: null - aliases: [] + site: + description: + - Name of the site on which the application is created. + required: true + default: null + aliases: [] state: description: - State of the web application From 60b5ae35b30d4c2a2b2d337ac413864d6df8251a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 14:23:35 +0200 Subject: [PATCH 079/157] cloudstack: make get_template_or_iso returning a dict for fix GH-646 --- cloud/cloudstack/cs_instance.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index a93a524383a..7cf4426267e 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -355,6 +355,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) self.instance = None + self.template = None + self.iso = None def get_service_offering_id(self): @@ -371,7 +373,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.module.fail_json(msg="Service offering '%s' not found" % service_offering) - def get_template_or_iso_id(self): + def get_template_or_iso(self, key=None): template = self.module.params.get('template') iso = self.module.params.get('iso') @@ -388,21 +390,28 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['zoneid'] = self.get_zone('id') if template: + if self.template: + return self._get_by_key(key, self.template) + args['templatefilter'] = 'executable' templates = self.cs.listTemplates(**args) if templates: for t in templates['template']: if template in [ t['displaytext'], t['name'], t['id'] ]: - return t['id'] + self.template = t + return self._get_by_key(key, self.template) self.module.fail_json(msg="Template '%s' not found" % template) elif iso: + if self.iso: + return self._get_by_key(key, self.iso) args['isofilter'] = 'executable' isos = self.cs.listIsos(**args) if isos: for i in isos['iso']: if iso in [ i['displaytext'], i['name'], i['id'] ]: - return i['id'] + self.iso = i + return self._get_by_key(key, self.iso) self.module.fail_json(msg="ISO '%s' not found" % iso) @@ -503,7 +512,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['changed'] = True args = {} - args['templateid'] = self.get_template_or_iso_id() + args['templateid'] = self.get_template_or_iso(key='id') args['zoneid'] = self.get_zone('id') args['serviceofferingid'] = self.get_service_offering_id() args['account'] = self.get_account('name') From b1e6d6ba52c7aaa5f2ab1c73e642d774ad88986c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 14:52:31 +0200 Subject: [PATCH 080/157] cloudstack: fix cs_instance hypervisor must be omitted if set on template/iso Fix related to issue reported in PR GH-646 --- cloud/cloudstack/cs_instance.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 7cf4426267e..0d156390e83 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -70,8 +70,8 @@ options: hypervisor: description: - Name the hypervisor to be used for creating the new instance. - - Relevant when using C(state=present) and option C(ISO) is used. - - If not set, first found hypervisor will be used. + - Relevant when using C(state=present), but only considered if not set on ISO/template. + - If not set or found on ISO/template, first found hypervisor will be used. required: false default: null choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] @@ -520,7 +520,6 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['projectid'] = self.get_project('id') args['diskofferingid'] = self.get_disk_offering_id() args['networkids'] = self.get_network_ids() - args['hypervisor'] = self.get_hypervisor() args['userdata'] = self.get_user_data() args['keyboard'] = self.module.params.get('keyboard') args['ipaddress'] = self.module.params.get('ip_address') @@ -532,6 +531,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + template_iso = self.get_template_or_iso() + if 'hypervisor' not in template_iso: + args['hypervisor'] = self.get_hypervisor() + instance = None if not self.module.check_mode: instance = self.cs.deployVirtualMachine(**args) From 12d76027df51d96f584972b9cae9699395d3de87 Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Fri, 26 Jun 2015 17:00:58 -0700 Subject: [PATCH 081/157] upgraded docs and argspec to match module guidelines --- cloud/amazon/ec2_win_password.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index 05aa67e3d29..d555ce625d3 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -7,7 +7,7 @@ short_description: gets the default administrator password for ec2 windows insta description: - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto. version_added: "2.0" -author: Rick Mendes +author: Rick Mendes(@rickmendes) options: instance_id: description: @@ -22,6 +22,7 @@ options: description: - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3. required: false + default: null region: description: - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -39,6 +40,7 @@ options: version_added: "2.0" description: - Number of seconds to wait before giving up. + required: false default: 120 extends_documentation_fragment: aws @@ -93,9 +95,9 @@ def main(): argument_spec.update(dict( instance_id = dict(required=True), key_file = dict(required=True), - key_passphrase = dict(default=None), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=120), + key_passphrase = dict(no_log=True, default=None, required=False), + wait = dict(type='bool', default=False, required=False), + wait_timeout = dict(default=120, required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) From 9a1918c62875fde93267213631fc8852a704f31e Mon Sep 17 00:00:00 2001 From: Tim Hoiberg Date: Wed, 13 May 2015 19:40:50 +1000 Subject: [PATCH 082/157] Adding module to manage Ruby Gem dependencies via Bundler --- packaging/language/bundler.py | 199 ++++++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 packaging/language/bundler.py diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py new file mode 100644 index 00000000000..877d09dbea5 --- /dev/null +++ b/packaging/language/bundler.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tim Hoiberg +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION=''' +--- +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem +version_added: "2.0.0" +options: + executable: + description: + - The path to the bundler executable + required: false + default: null + state: + description: + - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + required: false + choices: [present, latest] + default: present + chdir: + description: + - The directory to execute the bundler commands from. This directoy needs to contain a valid Gemfile or .bundle/ directory + required: false + default: temporary working directory + exclude_groups: + description: + - A list of Gemfile groups to exclude during operations. This only applies when state is C(present). Bundler considers this a 'remembered' + property for the Gemfile and will automatically exclude groups in future operations even if C(exclude_groups) is not set + required: false + default: null + clean: + description: + - Only applies if state is C(present). If set removes any gems on the target host that are not in the gemfile + required: false + choices: [yes, no] + default: "no" + gemfile: + description: + - Only applies if state is C(present). The path to the gemfile to use to install gems. + required: false + default: Gemfile in current directory + local: + description: + - If set only installs gems from the cache on the target host + required: false + choices: [yes, no] + default: "no" + deployment_mode: + description: + - Only applies if state is C(present). If set it will only install gems that are in the default or production groups. Requires a Gemfile.lock + file to have been created prior + required: false + choices: [yes, no] + default: "no" + user_install: + description: + - Only applies if state is C(present). Installs gems in the local user's cache or for all users + required: false + choices: [yes, no] + default: "yes" + gem_path: + description: + - Only applies if state is C(present). Specifies the directory to install the gems into. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: RubyGems gem paths + binstub_directory: + description: + - Only applies if state is C(present). Specifies the directory to install any gem bins files to. When executed the bin files will run within + the context of the Gemfile and fail if any required gem dependencies are not installed. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: null + extra_args: + description: + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation for more + information + required: false + default: null +author: Tim Hoiberg +''' + +EXAMPLES=''' +# Installs gems from a Gemfile in the current directory +- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle + +# Excludes the production group from installing +- bundler: state=present exclude_groups=production + +# Only install gems from the default and production groups +- bundler: state=present deployment=yes + +# Installs gems using a Gemfile in another directory +- bunlder: state=present gemfile=../rails_project/Gemfile + +# Updates Gemfile in another directory +- bundler: state=latest chdir=~/rails_project +''' + + +def get_bundler_executable(module): + if module.params.get('executable'): + return module.params.get('executable').split(' ') + else: + return [ module.get_bin_path('bundle', True) ] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False), + exclude_groups=dict(default=None, required=False, type='list'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False), + binstub_directory=dict(default=None, required=False), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + executable = module.params.get('executable') + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_install_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +from ansible.module_utils.basic import * +main() \ No newline at end of file From 1d48c9658a6c539f6a82f6b857342cc20a321597 Mon Sep 17 00:00:00 2001 From: Tim Hoiberg Date: Sat, 27 Jun 2015 15:50:30 +1000 Subject: [PATCH 083/157] Fixing typo --- packaging/language/bundler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index 877d09dbea5..82ef2838a9a 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -110,7 +110,7 @@ EXAMPLES=''' - bundler: state=present deployment=yes # Installs gems using a Gemfile in another directory -- bunlder: state=present gemfile=../rails_project/Gemfile +- bundler: state=present gemfile=../rails_project/Gemfile # Updates Gemfile in another directory - bundler: state=latest chdir=~/rails_project From b031e818b1e3c26bcd5050d2ccf15e614511cd46 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 09:39:53 +0200 Subject: [PATCH 084/157] cloudstack: fix cs_instance can not find iso and disk offering if domain is set. This does only affect root admins. --- cloud/cloudstack/cs_instance.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 0d156390e83..3023498db39 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -388,6 +388,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['domainid'] = self.get_domain('id') args['projectid'] = self.get_project('id') args['zoneid'] = self.get_zone('id') + args['isrecursive'] = True if template: if self.template: @@ -421,10 +422,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if not disk_offering: return None - args = {} - args['domainid'] = self.get_domain('id') - - disk_offerings = self.cs.listDiskOfferings(**args) + disk_offerings = self.cs.listDiskOfferings() if disk_offerings: for d in disk_offerings['diskoffering']: if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: From c8d6d68428949e66f8b726fe6094f7794f2e9ec4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 09:42:18 +0200 Subject: [PATCH 085/157] cloudstack: cleanup cs_instance use param key exlicitly for utils methods --- cloud/cloudstack/cs_instance.py | 36 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 3023498db39..f53c0079821 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -384,10 +384,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.module.fail_json(msg="Template are ISO are mutually exclusive.") args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') args['isrecursive'] = True if template: @@ -436,10 +436,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): instance_name = self.module.params.get('name') args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') instances = self.cs.listVirtualMachines(**args) if instances: @@ -456,10 +456,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): return None args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') networks = self.cs.listNetworks(**args) if not networks: @@ -511,11 +511,11 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args = {} args['templateid'] = self.get_template_or_iso(key='id') - args['zoneid'] = self.get_zone('id') + args['zoneid'] = self.get_zone(key='id') args['serviceofferingid'] = self.get_service_offering_id() - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') args['diskofferingid'] = self.get_disk_offering_id() args['networkids'] = self.get_network_ids() args['userdata'] = self.get_user_data() @@ -556,12 +556,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args_instance_update['group'] = self.module.params.get('group') args_instance_update['displayname'] = self.get_display_name() args_instance_update['userdata'] = self.get_user_data() - args_instance_update['ostypeid'] = self.get_os_type('id') + args_instance_update['ostypeid'] = self.get_os_type(key='id') args_ssh_key = {} args_ssh_key['id'] = instance['id'] args_ssh_key['keypair'] = self.module.params.get('ssh_key') - args_ssh_key['projectid'] = self.get_project('id') + args_ssh_key['projectid'] = self.get_project(key='id') if self._has_changed(args_service_offering, instance) or \ self._has_changed(args_instance_update, instance) or \ From 5b86a15cdb960c42a71253e1036e35e1f2eb9977 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 10:04:19 +0200 Subject: [PATCH 086/157] cloudstack: cs_instance do not pass zoneid to listVirtualMachines This change is related to 2 issues; - The API does not return destroyed VMs if zone ID is passed for CS version < 4.5.2. Also see CLOUDSTACK-8578. This only affects domain and root admins. - The instance name must be unique across all zones. If we pass the zone ID to find a VM, it will not be found if it is in a different zone but a deployment with the name would fail. --- cloud/cloudstack/cs_instance.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index f53c0079821..9aa9bb89651 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -439,8 +439,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') args['projectid'] = self.get_project(key='id') - args['zoneid'] = self.get_zone(key='id') - + # Do not pass zoneid, as the instance name must be unique across zones. instances = self.cs.listVirtualMachines(**args) if instances: for v in instances['virtualmachine']: From 94060b5adeb01b5b88580f0cec7c4a9ea5bff117 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 11:56:28 +0200 Subject: [PATCH 087/157] cloudstack: fix state=expunged in cs_instance --- cloud/cloudstack/cs_instance.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 9aa9bb89651..f6518b85e52 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -633,7 +633,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if instance['state'].lower() in [ 'destroying', 'destroyed' ]: self.result['changed'] = True if not self.module.check_mode: - res = self.cs.expungeVirtualMachine(id=instance['id']) + res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True) elif instance['state'].lower() not in [ 'expunging' ]: self.result['changed'] = True @@ -645,7 +645,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): poll_async = self.module.params.get('poll_async') if poll_async: - instance = self._poll_job(res, 'virtualmachine') + res = self._poll_job(res, 'virtualmachine') return instance From db33fcf89a1f2c71c33b745810b0823887780e23 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 28 Jun 2015 12:52:05 +0200 Subject: [PATCH 088/157] cloudstack: update code to match best practice * Remove catchall exception * use `if __name__ == '__main__':` --- cloud/cloudstack/cs_account.py | 6 ++---- cloud/cloudstack/cs_affinitygroup.py | 6 ++---- cloud/cloudstack/cs_firewall.py | 6 ++---- cloud/cloudstack/cs_instance.py | 6 ++---- cloud/cloudstack/cs_instancegroup.py | 6 ++---- cloud/cloudstack/cs_iso.py | 6 ++---- cloud/cloudstack/cs_network.py | 6 ++---- cloud/cloudstack/cs_portforward.py | 6 ++---- cloud/cloudstack/cs_project.py | 6 ++---- cloud/cloudstack/cs_securitygroup.py | 6 ++---- cloud/cloudstack/cs_securitygroup_rule.py | 6 ++---- cloud/cloudstack/cs_sshkeypair.py | 6 ++---- cloud/cloudstack/cs_template.py | 6 ++---- cloud/cloudstack/cs_vmsnapshot.py | 6 ++---- 14 files changed, 28 insertions(+), 56 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index cc487af5e51..d1302854454 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -400,11 +400,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 580cc5d7e8d..cfd76816e1b 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -246,11 +246,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 96b3f20f7cf..97cf97e781e 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -451,11 +451,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index f6518b85e52..79b1c58a586 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -852,11 +852,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 478748aeec3..7280ceff5ea 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -223,11 +223,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index e3ba322f6ba..67e4b283155 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -354,11 +354,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index b602b345677..50dd2981e72 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -627,11 +627,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 3b88ca85723..df95bfd3ea6 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -427,11 +427,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index 0f391bc5005..f09c42f5899 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -332,11 +332,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 54a71686a6e..a6827f6f811 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -190,11 +190,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index e943e7d11c2..0780e12d70d 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -429,11 +429,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 180e96ca6ae..28c6b3802b4 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -249,11 +249,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py index 1cd245d2b5c..8e56aafaa7e 100644 --- a/cloud/cloudstack/cs_template.py +++ b/cloud/cloudstack/cs_template.py @@ -623,11 +623,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 24e8a46fa37..62dec7ca35d 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -317,11 +317,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) - except Exception, e: - module.fail_json(msg='Exception: %s' % str(e)) - module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 0dc2fb73d3b2d0715695913ffa28ee81bea6eb3b Mon Sep 17 00:00:00 2001 From: Christopher Troup Date: Sun, 28 Jun 2015 11:09:44 -0400 Subject: [PATCH 089/157] Add GPL file header --- cloud/amazon/route53_zone.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py index 2383563fafe..07a049b14f7 100644 --- a/cloud/amazon/route53_zone.py +++ b/cloud/amazon/route53_zone.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . DOCUMENTATION = ''' module: route53_zone From c7d554677736566d8aace3632e84c04ba744bbd9 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Mon, 29 Jun 2015 09:27:44 +0200 Subject: [PATCH 090/157] openbsd_pkg: Update author mail address. --- packaging/os/openbsd_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 1b5d0bb06b2..1f331261d98 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Patrik Lundin +# (c) 2013, Patrik Lundin # # This file is part of Ansible # From 90d0828b1a6a40ac60b028a4a0c3cec50014692f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 29 Jun 2015 11:58:56 +0200 Subject: [PATCH 091/157] cloudstack: fix user_data gathering, must not be in for loop --- cloud/cloudstack/cs_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py index f8749834120..e2bebf8b116 100644 --- a/cloud/cloudstack/cs_facts.py +++ b/cloud/cloudstack/cs_facts.py @@ -130,7 +130,7 @@ class CloudStackFacts(object): if not filter: for key,path in self.fact_paths.iteritems(): result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path) - result['cloudstack_user_data'] = self._get_user_data_json() + result['cloudstack_user_data'] = self._get_user_data_json() else: if filter == 'cloudstack_user_data': result['cloudstack_user_data'] = self._get_user_data_json() From 9e8802cacd26617efbab32f26505158a6e2d64fc Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 29 Jun 2015 20:45:53 +1000 Subject: [PATCH 092/157] Docfixes - add version_added and default values. --- cloud/amazon/dynamodb_table.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 130fae44721..f3ba7d7e77c 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -22,6 +22,7 @@ description: - Create or delete AWS Dynamo DB tables. - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. +version_added: "2.0" author: Alan Loi (@loia) requirements: - "boto >= 2.13.2" @@ -41,6 +42,7 @@ options: - Name of the hash key. - Required when C(state=present). required: false + default: null hash_key_type: description: - Type of the hash key. @@ -51,6 +53,7 @@ options: description: - Name of the range key. required: false + default: null range_key_type: description: - Type of the range key. From c7f0fafe62c4cb08762dbffa2dbe01921123549b Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 29 Jun 2015 20:55:33 +1000 Subject: [PATCH 093/157] Check AWS region and credentials are provided. --- cloud/amazon/dynamodb_table.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index f3ba7d7e77c..4b29cfbfaa9 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -119,7 +119,7 @@ try: from boto.dynamodb2.table import Table from boto.dynamodb2.fields import HashKey, RangeKey from boto.dynamodb2.types import STRING, NUMBER, BINARY - from boto.exception import BotoServerError, JSONResponseError + from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError HAS_BOTO = True except ImportError: @@ -261,7 +261,14 @@ def main(): module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + if not region: + module.fail_json(msg='region must be specified') + + try: + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + + except (NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) state = module.params.get('state') if state == 'present': @@ -274,4 +281,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From 86fda85ba38f45d9f274fb0af73d3f291a6e5be3 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Mon, 29 Jun 2015 14:18:09 +0200 Subject: [PATCH 094/157] updated version added for source into the docs --- system/firewalld.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/firewalld.py b/system/firewalld.py index 0348c6ecb47..677ced8aa78 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -46,6 +46,7 @@ options: - 'The source/network you would like to add/remove to/from firewalld' required: false default: null + version_added: "2.0" zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' From f14317f7f54e7cc873f284c1ea82927b6bd45820 Mon Sep 17 00:00:00 2001 From: tylerturk Date: Mon, 29 Jun 2015 07:51:58 -0500 Subject: [PATCH 095/157] Fix documentation bug --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7719006502d..ff1ce9831db 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -115,7 +115,7 @@ EXAMPLES = """ gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}' - name: start gluster volume - gluster_volume: status=started name=test1 + gluster_volume: state=started name=test1 - name: limit usage gluster_volume: state=present name=test1 directory=/foo quota=20.0MB From 57e7a6662a3d5cca7ebed01539b9730941ef6a4b Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Mon, 29 Jun 2015 17:08:48 +0200 Subject: [PATCH 096/157] Work around a software bug in vSphere Due to a software bug in vSphere, it fails to handle ampersand in datacenter names. The solution is to do what vSphere does (when browsing) and double-encode ampersands. It is likely other characters need special treatment like this as well, haven't found any. --- cloud/vmware/vsphere_copy.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/vmware/vsphere_copy.py b/cloud/vmware/vsphere_copy.py index 7c044a7d51a..44e20caebdf 100644 --- a/cloud/vmware/vsphere_copy.py +++ b/cloud/vmware/vsphere_copy.py @@ -78,6 +78,9 @@ import socket def vmware_path(datastore, datacenter, path): ''' Constructs a URL path that VSphere accepts reliably ''' path = "/folder/%s" % path.lstrip("/") + # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names + # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ? + datacenter = datacenter.replace('&', '%26') if not path.startswith("/"): path = "/" + path params = dict( dsName = datastore ) From 86d5ca411c2e8d770515b544602c378a39ac7471 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 13:09:11 -0700 Subject: [PATCH 097/157] Add testing documentation to travis --- .travis.yml | 1 + test-docs.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100755 test-docs.sh diff --git a/.travis.yml b/.travis.yml index 84ec3a0983a..d43c6b3b3fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,3 +13,4 @@ script: - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + - ./test-docs.sh extras diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite From 0f9ade7fe3e02010dc8652126e889f3cb48a79b1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 10:37:09 -0700 Subject: [PATCH 098/157] Fix bundler documentation --- packaging/language/bundler.py | 39 ++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index 82ef2838a9a..5f605f5e947 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -40,18 +40,22 @@ options: default: present chdir: description: - - The directory to execute the bundler commands from. This directoy needs to contain a valid Gemfile or .bundle/ directory + - The directory to execute the bundler commands from. This directoy + needs to contain a valid Gemfile or .bundle/ directory required: false default: temporary working directory exclude_groups: description: - - A list of Gemfile groups to exclude during operations. This only applies when state is C(present). Bundler considers this a 'remembered' - property for the Gemfile and will automatically exclude groups in future operations even if C(exclude_groups) is not set + - A list of Gemfile groups to exclude during operations. This only + applies when state is C(present). Bundler considers this + a 'remembered' property for the Gemfile and will automatically exclude + groups in future operations even if C(exclude_groups) is not set required: false default: null clean: description: - - Only applies if state is C(present). If set removes any gems on the target host that are not in the gemfile + - Only applies if state is C(present). If set removes any gems on the + target host that are not in the gemfile required: false choices: [yes, no] default: "no" @@ -68,8 +72,9 @@ options: default: "no" deployment_mode: description: - - Only applies if state is C(present). If set it will only install gems that are in the default or production groups. Requires a Gemfile.lock - file to have been created prior + - Only applies if state is C(present). If set it will only install gems + that are in the default or production groups. Requires a Gemfile.lock + file to have been created prior required: false choices: [yes, no] default: "no" @@ -81,19 +86,25 @@ options: default: "yes" gem_path: description: - - Only applies if state is C(present). Specifies the directory to install the gems into. If C(chdir) is set then this path is relative to C(chdir) - required: false - default: RubyGems gem paths + - Only applies if state is C(present). Specifies the directory to + install the gems into. If C(chdir) is set then this path is relative to + C(chdir) + required: false + default: RubyGems gem paths binstub_directory: description: - - Only applies if state is C(present). Specifies the directory to install any gem bins files to. When executed the bin files will run within - the context of the Gemfile and fail if any required gem dependencies are not installed. If C(chdir) is set then this path is relative to C(chdir) + - Only applies if state is C(present). Specifies the directory to + install any gem bins files to. When executed the bin files will run + within the context of the Gemfile and fail if any required gem + dependencies are not installed. If C(chdir) is set then this path is + relative to C(chdir) required: false default: null extra_args: description: - - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation for more - information + - A space separated string of additional commands that can be applied to + the Bundler command. Refer to the Bundler documentation for more + information required: false default: null author: Tim Hoiberg @@ -196,4 +207,4 @@ def main(): from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From 3be267b57908d013533e8cfbbe3bc78a67da2b0f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 13:45:12 -0500 Subject: [PATCH 099/157] Give dpkg_selections a .py file extension --- packaging/{dpkg_selections => dpkg_selections.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging/{dpkg_selections => dpkg_selections.py} (100%) diff --git a/packaging/dpkg_selections b/packaging/dpkg_selections.py similarity index 100% rename from packaging/dpkg_selections rename to packaging/dpkg_selections.py From 8ba11e97e24396a32e8d8a3276f6c6e7960f2371 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 13:45:24 -0500 Subject: [PATCH 100/157] Add missing __init__.py file --- clustering/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 clustering/__init__.py diff --git a/clustering/__init__.py b/clustering/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 9f9422fcb583a81d560627a90a2a503a84942ba6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 13:45:53 -0500 Subject: [PATCH 101/157] Update vsphere_copy.py to use new style module_utils import --- cloud/vmware/vsphere_copy.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_copy.py b/cloud/vmware/vsphere_copy.py index 44e20caebdf..4364e8b5197 100644 --- a/cloud/vmware/vsphere_copy.py +++ b/cloud/vmware/vsphere_copy.py @@ -149,6 +149,7 @@ def main(): else: module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url) -# this is magic, see lib/ansible/module_common.py -#<> +# Import module snippets +from ansible.module_utils.basic import * + main() From fda25aa93b9f11a144093cc8ec7c167b0ef302ff Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 13:46:14 -0500 Subject: [PATCH 102/157] Fix interpreter line in webfaction modules --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 3e42ec1265e..3d11d17a432 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -1,4 +1,4 @@ -#! /usr/bin/python +#!/usr/bin/python # # Create a Webfaction application using Ansible and the Webfaction API # diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index f420490711c..82eac1c1f42 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -1,4 +1,4 @@ -#! /usr/bin/python +#!/usr/bin/python # # Create a webfaction database using Ansible and the Webfaction API # diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 0b35faf110f..c809dd6beb3 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -1,4 +1,4 @@ -#! /usr/bin/python +#!/usr/bin/python # # Create Webfaction domains and subdomains using Ansible and the Webfaction API # diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 7547b6154e5..c08bd477601 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -1,4 +1,4 @@ -#! /usr/bin/python +#!/usr/bin/python # # Create webfaction mailbox using Ansible and the Webfaction API # diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 57eae39c0dc..bb1bfb94457 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -1,4 +1,4 @@ -#! /usr/bin/python +#!/usr/bin/python # # Create Webfaction website using Ansible and the Webfaction API # From 9a36454329da8909a675e3cc555dce2acda230df Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 13:46:45 -0500 Subject: [PATCH 103/157] replace tabs with spaces in mongodb_user.py --- database/misc/mongodb_user.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index ede8004945b..0529abdea09 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -225,10 +225,10 @@ def main(): update_password = module.params['update_password'] try: - if replica_set: - client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) - else: - client = MongoClient(login_host, int(login_port), ssl=ssl) + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) + else: + client = MongoClient(login_host, int(login_port), ssl=ssl) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() From 5605c4d7b5ef825744c8b4260f19f1c6172f7625 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 11:09:55 -0700 Subject: [PATCH 104/157] Add author github ID --- packaging/language/bundler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index 5f605f5e947..e98350a7b70 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -107,7 +107,7 @@ options: information required: false default: null -author: Tim Hoiberg +author: "Tim Hoiberg (@thoiberg)" ''' EXAMPLES=''' @@ -207,4 +207,5 @@ def main(): from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 4e48ef9ecace3a6eb92e3e4d2ef1a3ea2b7e33ab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 07:25:02 -0700 Subject: [PATCH 105/157] Disable travis docs checks for now --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d43c6b3b3fa..057524c4def 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,4 +13,4 @@ script: - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . - - ./test-docs.sh extras + #- ./test-docs.sh extras From e724dc2beda665b9537e8dc65a0b553c3e1295b4 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Wed, 1 Jul 2015 17:48:06 +0100 Subject: [PATCH 106/157] webfaction: Allow machine to be specified if account has more than one. --- cloud/webfaction/webfaction_app.py | 27 ++++++++++++++++++++++----- cloud/webfaction/webfaction_db.py | 28 ++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 11 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 3d11d17a432..1c015a401d1 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -7,7 +7,9 @@ # # ------------------------------------------ # -# (c) Quentin Stafford-Fraser 2015 +# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini # # This file is part of Ansible # @@ -80,6 +82,12 @@ options: description: - The webfaction password to use required: true + + machine: + description: + - The machine name to use (optional for accounts with only one machine) + required: false + ''' EXAMPLES = ''' @@ -90,6 +98,7 @@ EXAMPLES = ''' type=mod_wsgi35-python27 login_name={{webfaction_user}} login_password={{webfaction_passwd}} + machine={{webfaction_machine}} ''' import xmlrpclib @@ -108,6 +117,7 @@ def main(): port_open = dict(required=False, choices=BOOLEANS, default=False), login_name = dict(required=True), login_password = dict(required=True), + machine = dict(required=False, default=False), ), supports_check_mode=True ) @@ -115,10 +125,17 @@ def main(): app_type = module.params['type'] app_state = module.params['state'] - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) app_list = webfaction.list_apps(session_id) app_map = dict([(i['name'], i) for i in app_list]) diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 82eac1c1f42..6c45e700e9b 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -4,7 +4,9 @@ # # ------------------------------------------ # -# (c) Quentin Stafford-Fraser and Andy Baker 2015 +# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini # # This file is part of Ansible # @@ -68,6 +70,11 @@ options: description: - The webfaction password to use required: true + + machine: + description: + - The machine name to use (optional for accounts with only one machine) + required: false ''' EXAMPLES = ''' @@ -81,6 +88,7 @@ EXAMPLES = ''' type: mysql login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" + machine: "{{webfaction_machine}}" # Note that, for symmetry's sake, deleting a database using # 'state: absent' will also delete the matching user. @@ -103,6 +111,7 @@ def main(): password = dict(required=False, default=None), login_name = dict(required=True), login_password = dict(required=True), + machine = dict(required=False, default=False), ), supports_check_mode=True ) @@ -111,10 +120,17 @@ def main(): db_type = module.params['type'] db_passwd = module.params['password'] - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) db_list = webfaction.list_dbs(session_id) db_map = dict([(i['name'], i) for i in db_list]) @@ -130,7 +146,7 @@ def main(): if db_state == 'present': - # Does an database with this name already exist? + # Does a database with this name already exist? if existing_db: # Yes, but of a different type - fail if existing_db['db_type'] != db_type: From 9398d0509fd30cd775aa32ef5a50fe6bc810170c Mon Sep 17 00:00:00 2001 From: William Brown Date: Thu, 2 Jul 2015 07:56:56 +0930 Subject: [PATCH 107/157] Changes to allow FS resize in filesystem --- system/filesystem.py | 85 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 74 insertions(+), 11 deletions(-) diff --git a/system/filesystem.py b/system/filesystem.py index a2f979ecd0b..0a98a8e2fdc 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -41,6 +41,12 @@ options: description: - If yes, allows to create new filesystem on devices that already has filesystem. required: false + resizefs: + choices: [ "yes", "no" ] + default: "no" + description: + - If yes, if the block device and filessytem size differ, grow the filesystem into the space. Note, XFS Will only grow if mounted. + required: false opts: description: - List of options to be passed to mkfs command. @@ -63,17 +69,68 @@ def main(): dev=dict(required=True, aliases=['device']), opts=dict(), force=dict(type='bool', default='no'), + resizefs=dict(type='bool', default='no'), ), supports_check_mode=True, ) + # There is no "single command" to manipulate filesystems, so we map them all out and their options + fs_cmd_map = { + 'ext2' : { + 'mkfs' : 'mkfs.ext2', + 'grow' : 'resize2fs', + 'grow_flag' : None, + 'force_flag' : '-F', + }, + 'ext3' : { + 'mkfs' : 'mkfs.ext3', + 'grow' : 'resize2fs', + 'grow_flag' : None, + 'force_flag' : '-F', + }, + 'ext4' : { + 'mkfs' : 'mkfs.ext4', + 'grow' : 'resize2fs', + 'grow_flag' : None, + 'force_flag' : '-F', + }, + 'ext4dev' : { + 'mkfs' : 'mkfs.ext4', + 'grow' : 'resize2fs', + 'grow_flag' : None, + 'force_flag' : '-F', + }, + 'xfs' : { + 'mkfs' : 'mkfs.xfs', + 'grow' : 'xfs_growfs', + 'grow_flag' : None, + 'force_flag' : '-f', + }, + 'btrfs' : { + 'mkfs' : 'mkfs.btrfs', + 'grow' : 'btrfs', + 'grow_flag' : 'filesystem resize', + 'force_flag' : '-f', + } + } + dev = module.params['dev'] fstype = module.params['fstype'] opts = module.params['opts'] force = module.boolean(module.params['force']) + resizefs = module.boolean(module.params['resizefs']) changed = False + try: + _ = fs_cmd_map[fstype] + except KeyError: + module.exit_json(changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype) + + mkfscmd = fs_cmd_map[fstype]['mkfs'] + force_flag = fs_cmd_map[fstype]['force_flag'] + growcmd = fs_cmd_map[fstype]['grow'] + if not os.path.exists(dev): module.fail_json(msg="Device %s not found."%dev) @@ -82,9 +139,21 @@ def main(): rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) fs = raw_fs.strip() - - if fs == fstype: + if fs == fstype and resizefs == False: module.exit_json(changed=False) + elif fs == fstype and resizefs == True: + cmd = module.get_bin_path(growcmd, required=True) + if module.check_mode: + module.exit_json(changed=True, msg="May resize filesystem") + else: + rc,out,err = module.run_command("%s %s" % (cmd, dev)) + # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on. + # in the future, you would have to parse the output to determine this. + # thankfully, these are safe operations if no change is made. + if rc == 0: + module.exit_json(changed=True, msg=out) + else: + module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) elif fs and not force: module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) @@ -93,19 +162,13 @@ def main(): if module.check_mode: changed = True else: - mkfs = module.get_bin_path('mkfs', required=True) + mkfs = module.get_bin_path(mkfscmd, required=True) cmd = None - if fstype in ['ext2', 'ext3', 'ext4', 'ext4dev']: - force_flag="-F" - elif fstype in ['xfs', 'btrfs']: - force_flag="-f" - else: - force_flag="" if opts is None: - cmd = "%s -t %s %s '%s'" % (mkfs, fstype, force_flag, dev) + cmd = "%s %s '%s'" % (mkfs, force_flag, dev) else: - cmd = "%s -t %s %s %s '%s'" % (mkfs, fstype, force_flag, opts, dev) + cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev) rc,_,err = module.run_command(cmd) if rc == 0: changed = True From bbb578ac594e313a45d481b7cacec33cbfec4513 Mon Sep 17 00:00:00 2001 From: Igor Khomyakov Date: Thu, 2 Jul 2015 14:17:56 +0300 Subject: [PATCH 108/157] fix user notification for v2 api `notify` parameter is not working as expected for hipchat API v2. --- notification/hipchat.py | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 32689965cf9..57e97eaefec 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -5,7 +5,7 @@ DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" -short_description: Send a message to hipchat +short_description: Send a message to hipchat. description: - Send a message to hipchat options: @@ -56,7 +56,7 @@ options: version_added: 1.5.1 api: description: - - API url if using a self-hosted hipchat server + - API url if using a self-hosted hipchat server. For hipchat api version 2 use C(/v2) path in URI required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 @@ -67,7 +67,15 @@ author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' EXAMPLES = ''' -- hipchat: token=AAAAAA room=notify msg="Ansible task finished" +- hipchat: room=notify msg="Ansible task finished" + +# Use Hipchat API version 2 + +- hipchat: + api: "https://api.hipchat.com/v2/" + token: OAUTH2_TOKEN + room: notify + msg: "Ansible task finished" ''' # =========================================== @@ -80,7 +88,6 @@ DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" -MSG_URI_V2 = "/room/{id_or_name}/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', @@ -95,12 +102,8 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', params['message_format'] = msg_format params['color'] = color params['api'] = api - - if notify: - params['notify'] = 1 - else: - params['notify'] = 0 - + params['notify'] = int(notify) + url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) @@ -116,7 +119,7 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V2): + color='yellow', notify=False, api=NOTIFY_URI_V2): '''sending message to hipchat v2 server''' print "Sending message to v2 server" @@ -126,13 +129,11 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', body['message'] = msg body['color'] = color body['message_format'] = msg_format + params['notify'] = notify - if notify: - POST_URL = api + NOTIFY_URI_V2 - else: - POST_URL = api + MSG_URI_V2 - - url = POST_URL.replace('{id_or_name}',room) + POST_URL = api + NOTIFY_URI_V2 + + url = POST_URL.replace('{id_or_name}', room) data = json.dumps(body) if module.check_mode: From a706689a353f3c6906483f5fd105b2a93b5e8b4e Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 2 Jul 2015 08:51:13 -0400 Subject: [PATCH 109/157] Bump version_added. --- cloud/rackspace/rax_clb_ssl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index f16118c20f4..b467880400e 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -21,7 +21,7 @@ module: rax_clb_ssl short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. description: - Set up, reconfigure, or remove SSL termination for an existing load balancer. -version_added: "1.9" +version_added: "2.0" options: loadbalancer: description: From d1a63d39a27f8b2e3f999b8013cb0d52093a0900 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 2 Jul 2015 08:53:19 -0400 Subject: [PATCH 110/157] Include the balancer acted upon in the result. --- cloud/rackspace/rax_clb_ssl.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index b467880400e..bfd5f643020 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -193,7 +193,8 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, result = dict( changed=changed, https_redirect=balancer.httpsRedirect, - ssl_termination=new_ssl + ssl_termination=new_ssl, + balancer=balancer ) success = True From 9462ad55e3c99163b673850711981ba1737273ed Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 2 Jul 2015 08:59:54 -0400 Subject: [PATCH 111/157] Guard calls that modify the CLB with try/except. --- cloud/rackspace/rax_clb_ssl.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index bfd5f643020..eafa725d286 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -154,12 +154,18 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, needs_change = True if needs_change: - balancer.add_ssl_termination(**ssl_attrs) + try: + balancer.add_ssl_termination(**ssl_attrs) + except pyrax.exceptions.PyraxException, e: + module.fail_json(msg='%s' % e.message) changed = True elif state == 'absent': # Remove SSL termination if it's already configured. if existing_ssl: - balancer.delete_ssl_termination() + try: + balancer.delete_ssl_termination() + except pyrax.exceptions.PyraxException, e: + module.fail_json(msg='%s' % e.message) changed = True if https_redirect is not None and balancer.httpsRedirect != https_redirect: @@ -168,7 +174,10 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, # while the SSL termination changes above are being applied. pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - balancer.update(httpsRedirect=https_redirect) + try: + balancer.update(httpsRedirect=https_redirect) + except pyrax.exceptions.PyraxException, e: + module.fail_json(msg='%s' % e.message) changed = True if changed and wait: From bd4023fe8f177c457bf40fa9a3ae27af0d012c12 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 2 Jul 2015 09:09:28 -0400 Subject: [PATCH 112/157] Bring the examples up to date. --- cloud/rackspace/rax_clb_ssl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index eafa725d286..20ae9698457 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -78,7 +78,7 @@ extends_documentation_fragment: rackspace EXAMPLES = ''' - name: Enable SSL termination on a load balancer rax_clb_ssl: - balancer_name: the_loadbalancer + loadbalancer: the_loadbalancer state: present private_key: "{{ lookup('file', 'credentials/server.key' ) }}" certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" @@ -88,7 +88,7 @@ EXAMPLES = ''' - name: Disable SSL termination rax_clb_ssl: - balancer_name: "{{ registered_lb.balancer.id }}" + loadbalancer: "{{ registered_lb.balancer.id }}" state: absent wait: true ''' From 84880c5e35a6dc8e2eeddda3a1377d617ee57368 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 2 Jul 2015 09:24:07 -0400 Subject: [PATCH 113/157] Use rax_to_dict(). --- cloud/rackspace/rax_clb_ssl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index 20ae9698457..2013b8c4d81 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -203,7 +203,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, changed=changed, https_redirect=balancer.httpsRedirect, ssl_termination=new_ssl, - balancer=balancer + balancer=rax_to_dict(balancer, 'clb') ) success = True From d8023c225d83ba33454d9b6958d92151216a293f Mon Sep 17 00:00:00 2001 From: John W Higgins Date: Thu, 2 Jul 2015 17:07:07 -0700 Subject: [PATCH 114/157] Add zfs cloning --- system/zfs.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/system/zfs.py b/system/zfs.py index c3c87634377..00a81e32c54 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -66,6 +66,10 @@ options: - The checksum property. required: False choices: ['on','off',fletcher2,fletcher4,sha256] + clone: + description: + - Name of the snapshot to clone + required: False compression: description: - The compression property. @@ -253,8 +257,11 @@ class Zfs(object): properties = self.properties volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) + clone = properties.pop('clone', None) if "@" in self.name: action = 'snapshot' + elif clone: + action = 'clone' else: action = 'create' @@ -272,6 +279,8 @@ class Zfs(object): if volsize: cmd.append('-V') cmd.append(volsize) + if clone: + cmd.append(clone) cmd.append(self.name) (rc, err, out) = self.module.run_command(' '.join(cmd)) if rc == 0: @@ -347,6 +356,7 @@ def main(): 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, + 'clone': {'required': False}, 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, 'copies': {'required': False, 'choices':['1', '2', '3']}, 'createparent': {'required': False, 'choices':['on', 'off']}, From 094ef92aeee49d97e754109df13affdf4739f71f Mon Sep 17 00:00:00 2001 From: John W Higgins Date: Fri, 3 Jul 2015 16:16:18 -0700 Subject: [PATCH 115/157] Switch to origin and cleanup --- system/zfs.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index 00a81e32c54..f8a72a44f01 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -66,10 +66,6 @@ options: - The checksum property. required: False choices: ['on','off',fletcher2,fletcher4,sha256] - clone: - description: - - Name of the snapshot to clone - required: False compression: description: - The compression property. @@ -119,6 +115,10 @@ options: - The normalization property. required: False choices: [none,formC,formD,formKC,formKD] + origin: + description: + - Name of the snapshot to clone + required: False primarycache: description: - The primarycache property. @@ -225,6 +225,12 @@ EXAMPLES = ''' # Create a new file system called myfs2 with snapdir enabled - zfs: name=rpool/myfs2 state=present snapdir=enabled + +# Create a new file system by cloning a snapshot +- zfs: name=rpool/cloned_fs state=present origin=rpool/myfs@mysnapshot + +# Destroy a filesystem +- zfs: name=rpool/myfs state=absent ''' @@ -257,10 +263,10 @@ class Zfs(object): properties = self.properties volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) - clone = properties.pop('clone', None) + origin = properties.pop('origin', None) if "@" in self.name: action = 'snapshot' - elif clone: + elif origin: action = 'clone' else: action = 'create' @@ -279,8 +285,8 @@ class Zfs(object): if volsize: cmd.append('-V') cmd.append(volsize) - if clone: - cmd.append(clone) + if origin: + cmd.append(origin) cmd.append(self.name) (rc, err, out) = self.module.run_command(' '.join(cmd)) if rc == 0: @@ -356,7 +362,6 @@ def main(): 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, - 'clone': {'required': False}, 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, 'copies': {'required': False, 'choices':['1', '2', '3']}, 'createparent': {'required': False, 'choices':['on', 'off']}, @@ -370,6 +375,7 @@ def main(): 'mountpoint': {'required': False}, 'nbmand': {'required': False, 'choices':['on', 'off']}, 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']}, + 'origin': {'required': False}, 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, 'quota': {'required': False}, 'readonly': {'required': False, 'choices':['on', 'off']}, From 217221caed15d7a8cf714873253a885d5a64b6c3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Jul 2015 12:18:45 -0400 Subject: [PATCH 116/157] added version_added to new origin option --- system/zfs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/zfs.py b/system/zfs.py index f8a72a44f01..51b9db63692 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -119,6 +119,7 @@ options: description: - Name of the snapshot to clone required: False + version_added: "2.0" primarycache: description: - The primarycache property. From 311d73620b5788e43f43b7c0672c7b10254f3e4b Mon Sep 17 00:00:00 2001 From: Phil Date: Mon, 6 Jul 2015 09:59:51 -0500 Subject: [PATCH 117/157] use convertto-bool for rm and recurse params --- windows/win_unzip.ps1 | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index e4509a290a2..a62f246f5c8 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -62,19 +62,18 @@ Else { Fail-Json $result "missing required argument: dest" } -If ($params.recurse -eq "true" -Or $params.recurse -eq "yes") { - $recurse = $true +If ($params.recurse) { + $recurse = ConvertTo-Bool ($params.recurse) } Else { $recurse = $false } -If ($params.rm -eq "true" -Or $params.rm -eq "yes"){ - $rm = $true - Set-Attr $result.win_unzip "rm" "true" -} -Else { - $rm = $false +If ($params.rm) { + $rm = ConvertTo-Bool ($params.rm) +} +Else { + $rm = $false } If ($ext -eq ".zip" -And $recurse -eq $false) { @@ -111,7 +110,7 @@ Else { If ($recurse) { Expand-Archive -Path $src -OutputPath $dest -Force - If ($rm) { + If ($rm -eq $true) { Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % { Expand-Archive $_.FullName -OutputPath $dest -Force Remove-Item $_.FullName -Force From 5a2a22bf68b6cd305e6dbfe1a0044eaf5d9398ed Mon Sep 17 00:00:00 2001 From: Adam Keech Date: Thu, 25 Jun 2015 14:18:57 -0400 Subject: [PATCH 118/157] Adding win_regedit module --- windows/win_regedit.ps1 | 173 ++++++++++++++++++++++++++++++++++++++++ windows/win_regedit.py | 101 +++++++++++++++++++++++ 2 files changed, 274 insertions(+) create mode 100644 windows/win_regedit.ps1 create mode 100644 windows/win_regedit.py diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1 new file mode 100644 index 00000000000..18cdc99c6ae --- /dev/null +++ b/windows/win_regedit.ps1 @@ -0,0 +1,173 @@ +#!powershell +# This file is part of Ansible +# +# (c) 2015, Adam Keech , Josh Ludwig +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +$ErrorActionPreference = "Stop" + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +If ($params.name) +{ + $registryKeyName = $params.name +} +Else +{ + Fail-Json $result "missing required argument: name" +} + +If ($params.state) +{ + $state = $params.state.ToString().ToLower() + If (($state -ne "present") -and ($state -ne "absent")) + { + Fail-Json $result "state is $state; must be present or absent" + } +} +Else +{ + $state = "present" +} + +If ($params.value) +{ + $registryKeyValue = $params.value +} +ElseIf ($state -eq "present") +{ + Fail-Json $result "missing required argument: value" +} + +If ($params.valuetype) +{ + $registryValueType = $params.valuetype.ToString().ToLower() + $validRegistryValueTypes = "binary", "dword", "expandstring", "multistring", "string", "qword" + If ($validRegistryValueTypes -notcontains $registryValueType) + { + Fail-Json $result "valuetype is $registryValueType; must be binary, dword, expandstring, multistring, string, or qword" + } +} +Else +{ + $registryValueType = "string" +} + +If ($params.path) +{ + $registryKeyPath = $params.path +} +Else +{ + Fail-Json $result "missing required argument: path" +} + +Function Test-RegistryValue { + Param ( + [parameter(Mandatory=$true)] + [ValidateNotNullOrEmpty()]$Path, + [parameter(Mandatory=$true)] + [ValidateNotNullOrEmpty()]$Value + ) + Try { + Get-ItemProperty -Path $Path -Name $Value + Return $true + } + Catch { + Return $false + } +} + +if($state -eq "present") { + if (Test-Path $registryKeyPath) { + if (Test-RegistryValue -Path $registryKeyPath -Value $registryKeyName) + { + # Changes Type and Value + If ((Get-Item $registryKeyPath).GetValueKind($registryKeyName) -ne $registryValueType) + { + Try + { + Remove-ItemProperty -Path $registryKeyPath -Name $registryKeyName + New-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue -PropertyType $registryValueType + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } + # Only Changes Value + ElseIf ((Get-ItemProperty -Path $registryKeyPath | Select-Object -ExpandProperty $registryKeyName) -ne $registryKeyValue) + { + Try { + Set-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } + } + else + { + Try + { + New-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue -PropertyType $registryValueType + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } + } + else + { + Try + { + New-Item $registryKeyPath -Force | New-ItemProperty -Name $registryKeyName -Value $registryKeyValue -Force -PropertyType $registryValueType + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } +} +else +{ + if (Test-Path $registryKeyPath) + { + if (Test-RegistryValue -Path $registryKeyPath -Value $registryKeyName) { + Try + { + Remove-ItemProperty -Path $registryKeyPath -Name $registryKeyName + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } + } +} + +Exit-Json $result diff --git a/windows/win_regedit.py b/windows/win_regedit.py new file mode 100644 index 00000000000..007ddd4e8a9 --- /dev/null +++ b/windows/win_regedit.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Keech , Josh Ludwig +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_regedit +version_added: "2.0" +short_description: Add, Edit, or Remove Registry Key +description: + - Add, Edit, or Remove Registry Key using ItemProperties Cmdlets +options: + name: + description: + - Name of Registry Key + required: true + default: null + aliases: [] + value: + description: + - Value of Registry Key + required: false + default: null + aliases: [] + valuetype: + description: + - Type of Registry Key + required: false + choices: + - binary + - dword + - expandstring + - multistring + - string + - qword + default: string + aliases: [] + path: + description: + - Path of Registry Key + required: true + default: null + aliases: [] + state: + description: + - State of Registry Key + required: false + choices: + - present + - absent + default: present + aliases: [] +author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)" +''' + +EXAMPLES = ''' + # Add Registry Key (Default is String) + win_regedit: + name: testkey + value: 1337 + path: HKCU:\Software\MyCompany + + # Add Registry Key with Type DWord + win_regedit: + name: testkey + value: 1337 + valuetype: dword + path: HKCU:\Software\MyCompany + + # Edit Registry Key called testkey + win_regedit: + name: testkey + value: 8008 + path: HKCU:\Software\MyCompany + + # Remove Registry Key called testkey + win_regedit: + name: testkey + path: HKCU:\Software\MyCompany + state: absent +''' + From e84666fd7428012e29e013e792aa5574e6892c75 Mon Sep 17 00:00:00 2001 From: Adam Keech Date: Wed, 1 Jul 2015 09:53:35 -0400 Subject: [PATCH 119/157] Renaming variables in win_regedit module to make more sense with actions that are happening. --- windows/win_regedit.ps1 | 49 +++++++++++++++++++++-------------------- windows/win_regedit.py | 43 ++++++++++++++++++------------------ 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1 index 18cdc99c6ae..3e654202296 100644 --- a/windows/win_regedit.ps1 +++ b/windows/win_regedit.ps1 @@ -27,7 +27,7 @@ Set-Attr $result "changed" $false; If ($params.name) { - $registryKeyName = $params.name + $registryValueName = $params.name } Else { @@ -47,39 +47,39 @@ Else $state = "present" } -If ($params.value) +If ($params.data) { - $registryKeyValue = $params.value + $registryValueData = $params.data } ElseIf ($state -eq "present") { - Fail-Json $result "missing required argument: value" + Fail-Json $result "missing required argument: data" } -If ($params.valuetype) +If ($params.type) { - $registryValueType = $params.valuetype.ToString().ToLower() - $validRegistryValueTypes = "binary", "dword", "expandstring", "multistring", "string", "qword" - If ($validRegistryValueTypes -notcontains $registryValueType) + $registryDataType = $params.type.ToString().ToLower() + $validRegistryDataTypes = "binary", "dword", "expandstring", "multistring", "string", "qword" + If ($validRegistryDataTypes -notcontains $registryDataType) { - Fail-Json $result "valuetype is $registryValueType; must be binary, dword, expandstring, multistring, string, or qword" + Fail-Json $result "type is $registryDataType; must be binary, dword, expandstring, multistring, string, or qword" } } Else { - $registryValueType = "string" + $registryDataType = "string" } If ($params.path) { - $registryKeyPath = $params.path + $registryValuePath = $params.path } Else { Fail-Json $result "missing required argument: path" } -Function Test-RegistryValue { +Function Test-RegistryValueData { Param ( [parameter(Mandatory=$true)] [ValidateNotNullOrEmpty()]$Path, @@ -96,16 +96,16 @@ Function Test-RegistryValue { } if($state -eq "present") { - if (Test-Path $registryKeyPath) { - if (Test-RegistryValue -Path $registryKeyPath -Value $registryKeyName) + if (Test-Path $registryValuePath) { + if (Test-RegistryValueData -Path $registryValuePath -Value $registryValueName) { # Changes Type and Value - If ((Get-Item $registryKeyPath).GetValueKind($registryKeyName) -ne $registryValueType) + If ((Get-Item $registryValuePath).GetValueKind($registryValueName) -ne $registryDataType) { Try { - Remove-ItemProperty -Path $registryKeyPath -Name $registryKeyName - New-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue -PropertyType $registryValueType + Remove-ItemProperty -Path $registryValuePath -Name $registryValueName + New-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData -PropertyType $registryDataType $result.changed = $true } Catch @@ -114,10 +114,10 @@ if($state -eq "present") { } } # Only Changes Value - ElseIf ((Get-ItemProperty -Path $registryKeyPath | Select-Object -ExpandProperty $registryKeyName) -ne $registryKeyValue) + ElseIf ((Get-ItemProperty -Path $registryValuePath | Select-Object -ExpandProperty $registryValueName) -ne $registryValueData) { Try { - Set-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue + Set-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData $result.changed = $true } Catch @@ -130,7 +130,7 @@ if($state -eq "present") { { Try { - New-ItemProperty -Path $registryKeyPath -Name $registryKeyName -Value $registryKeyValue -PropertyType $registryValueType + New-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData -PropertyType $registryDataType $result.changed = $true } Catch @@ -143,7 +143,7 @@ if($state -eq "present") { { Try { - New-Item $registryKeyPath -Force | New-ItemProperty -Name $registryKeyName -Value $registryKeyValue -Force -PropertyType $registryValueType + New-Item $registryValuePath -Force | New-ItemProperty -Name $registryValueName -Value $registryValueData -Force -PropertyType $registryDataType $result.changed = $true } Catch @@ -154,12 +154,12 @@ if($state -eq "present") { } else { - if (Test-Path $registryKeyPath) + if (Test-Path $registryValuePath) { - if (Test-RegistryValue -Path $registryKeyPath -Value $registryKeyName) { + if (Test-RegistryValueData -Path $registryValuePath -Value $registryValueName) { Try { - Remove-ItemProperty -Path $registryKeyPath -Name $registryKeyName + Remove-ItemProperty -Path $registryValuePath -Name $registryValueName $result.changed = $true } Catch @@ -171,3 +171,4 @@ else } Exit-Json $result + diff --git a/windows/win_regedit.py b/windows/win_regedit.py index 007ddd4e8a9..d8fd3a7c25e 100644 --- a/windows/win_regedit.py +++ b/windows/win_regedit.py @@ -25,25 +25,25 @@ DOCUMENTATION = ''' --- module: win_regedit version_added: "2.0" -short_description: Add, Edit, or Remove Registry Key +short_description: Add, Edit, or Remove Registry Value description: - - Add, Edit, or Remove Registry Key using ItemProperties Cmdlets + - Add, Edit, or Remove Registry Value using ItemProperties Cmdlets options: name: description: - - Name of Registry Key + - Name of Registry Value required: true default: null aliases: [] - value: + data: description: - - Value of Registry Key + - Registry Value Data required: false default: null aliases: [] - valuetype: + type: description: - - Type of Registry Key + - Registry Value Data Type required: false choices: - binary @@ -56,13 +56,13 @@ options: aliases: [] path: description: - - Path of Registry Key + - Path of Registry Value required: true default: null aliases: [] state: description: - - State of Registry Key + - State of Registry Value required: false choices: - present @@ -73,29 +73,28 @@ author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)" ''' EXAMPLES = ''' - # Add Registry Key (Default is String) + # Add Registry Value (Default is String) win_regedit: - name: testkey - value: 1337 + name: testvalue + data: 1337 path: HKCU:\Software\MyCompany - # Add Registry Key with Type DWord + # Add Registry Value with Type DWord win_regedit: - name: testkey - value: 1337 - valuetype: dword + name: testvalue + data: 1337 + type: dword path: HKCU:\Software\MyCompany - # Edit Registry Key called testkey + # Edit Registry Value called testvalue win_regedit: - name: testkey - value: 8008 + name: testvalue + data: 8008 path: HKCU:\Software\MyCompany - # Remove Registry Key called testkey + # Remove Registry Value called testvalue win_regedit: - name: testkey + name: testvalue path: HKCU:\Software\MyCompany state: absent ''' - From 389e59b9708d005c73ed84b5c1703a4c9a3d931a Mon Sep 17 00:00:00 2001 From: Adam Keech Date: Mon, 6 Jul 2015 15:25:01 -0400 Subject: [PATCH 120/157] Adding functionality to not only edit Values, but also Keys. --- windows/win_regedit.ps1 | 80 ++++++++++++++++++++++++----------------- windows/win_regedit.py | 61 +++++++++++++++++-------------- 2 files changed, 83 insertions(+), 58 deletions(-) diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1 index 3e654202296..1a257413466 100644 --- a/windows/win_regedit.ps1 +++ b/windows/win_regedit.ps1 @@ -25,13 +25,22 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If ($params.name) +If ($params.key) { - $registryValueName = $params.name + $registryKey = $params.key } Else { - Fail-Json $result "missing required argument: name" + Fail-Json $result "missing required argument: key" +} + +If ($params.value) +{ + $registryValue = $params.value +} +Else +{ + $registryValue = $null } If ($params.state) @@ -49,16 +58,16 @@ Else If ($params.data) { - $registryValueData = $params.data + $registryData = $params.data } -ElseIf ($state -eq "present") +ElseIf ($state -eq "present" -and $registryValue -ne $null) { Fail-Json $result "missing required argument: data" } -If ($params.type) +If ($params.datatype) { - $registryDataType = $params.type.ToString().ToLower() + $registryDataType = $params.datatype.ToString().ToLower() $validRegistryDataTypes = "binary", "dword", "expandstring", "multistring", "string", "qword" If ($validRegistryDataTypes -notcontains $registryDataType) { @@ -70,15 +79,6 @@ Else $registryDataType = "string" } -If ($params.path) -{ - $registryValuePath = $params.path -} -Else -{ - Fail-Json $result "missing required argument: path" -} - Function Test-RegistryValueData { Param ( [parameter(Mandatory=$true)] @@ -96,16 +96,17 @@ Function Test-RegistryValueData { } if($state -eq "present") { - if (Test-Path $registryValuePath) { - if (Test-RegistryValueData -Path $registryValuePath -Value $registryValueName) + if ((Test-Path $registryKey) -and $registryValue -ne $null) + { + if (Test-RegistryValueData -Path $registryKey -Value $registryValue) { - # Changes Type and Value - If ((Get-Item $registryValuePath).GetValueKind($registryValueName) -ne $registryDataType) + # Changes Data and DataType + if ((Get-Item $registryKey).GetValueKind($registryValue) -ne $registryDataType) { Try { - Remove-ItemProperty -Path $registryValuePath -Name $registryValueName - New-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData -PropertyType $registryDataType + Remove-ItemProperty -Path $registryKey -Name $registryValue + New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType $result.changed = $true } Catch @@ -113,11 +114,11 @@ if($state -eq "present") { Fail-Json $result $_.Exception.Message } } - # Only Changes Value - ElseIf ((Get-ItemProperty -Path $registryValuePath | Select-Object -ExpandProperty $registryValueName) -ne $registryValueData) + # Changes Only Data + elseif ((Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue) -ne $registryData) { Try { - Set-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData + Set-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData $result.changed = $true } Catch @@ -130,7 +131,7 @@ if($state -eq "present") { { Try { - New-ItemProperty -Path $registryValuePath -Name $registryValueName -Value $registryValueData -PropertyType $registryDataType + New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType $result.changed = $true } Catch @@ -139,12 +140,17 @@ if($state -eq "present") { } } } - else + elseif(-not (Test-Path $registryKey)) { Try { - New-Item $registryValuePath -Force | New-ItemProperty -Name $registryValueName -Value $registryValueData -Force -PropertyType $registryDataType + $newRegistryKey = New-Item $registryKey -Force $result.changed = $true + + if($registryValue -ne $null) { + $newRegistryKey | New-ItemProperty -Name $registryValue -Value $registryData -Force -PropertyType $registryDataType + $result.changed = $true + } } Catch { @@ -154,12 +160,23 @@ if($state -eq "present") { } else { - if (Test-Path $registryValuePath) + if (Test-Path $registryKey) { - if (Test-RegistryValueData -Path $registryValuePath -Value $registryValueName) { + if ($registryValue -eq $null) { Try { - Remove-ItemProperty -Path $registryValuePath -Name $registryValueName + Remove-Item -Path $registryKey -Recurse + $result.changed = $true + } + Catch + { + Fail-Json $result $_.Exception.Message + } + } + elseif (Test-RegistryValueData -Path $registryKey -Value $registryValue) { + Try + { + Remove-ItemProperty -Path $registryKey -Name $registryValue $result.changed = $true } Catch @@ -171,4 +188,3 @@ else } Exit-Json $result - diff --git a/windows/win_regedit.py b/windows/win_regedit.py index d8fd3a7c25e..5087a5eaa8f 100644 --- a/windows/win_regedit.py +++ b/windows/win_regedit.py @@ -25,11 +25,17 @@ DOCUMENTATION = ''' --- module: win_regedit version_added: "2.0" -short_description: Add, Edit, or Remove Registry Value +short_description: Add, Edit, or Remove Registry Keys and Values description: - - Add, Edit, or Remove Registry Value using ItemProperties Cmdlets + - Add, Edit, or Remove Registry Keys and Values using ItemProperties Cmdlets options: - name: + key: + description: + - Name of Registry Key + required: true + default: null + aliases: [] + value: description: - Name of Registry Value required: true @@ -41,7 +47,7 @@ options: required: false default: null aliases: [] - type: + datatype: description: - Registry Value Data Type required: false @@ -54,12 +60,6 @@ options: - qword default: string aliases: [] - path: - description: - - Path of Registry Value - required: true - default: null - aliases: [] state: description: - State of Registry Value @@ -73,28 +73,37 @@ author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)" ''' EXAMPLES = ''' - # Add Registry Value (Default is String) + # Creates Registry Key called MyCompany. win_regedit: - name: testvalue - data: 1337 - path: HKCU:\Software\MyCompany + key: HKCU:\Software\MyCompany + + # Creates Registry Key called MyCompany, + # a value within MyCompany Key called "hello", and + # data for the value "hello" containing "world". + win_regedit: + key: HKCU:\Software\MyCompany + value: hello + data: world - # Add Registry Value with Type DWord + # Creates Registry Key called MyCompany, + # a value within MyCompany Key called "hello", and + # data for the value "hello" containing "1337" as type "dword". win_regedit: - name: testvalue + key: HKCU:\Software\MyCompany + value: hello data: 1337 - type: dword - path: HKCU:\Software\MyCompany + datatype: dword - # Edit Registry Value called testvalue + # Delete Registry Key MyCompany + # NOTE: Not specifying a value will delete the root key which means + # all values will be deleted win_regedit: - name: testvalue - data: 8008 - path: HKCU:\Software\MyCompany - - # Remove Registry Value called testvalue + key: HKCU:\Software\MyCompany + state: absent + + # Delete Registry Value "hello" from MyCompany Key win_regedit: - name: testvalue - path: HKCU:\Software\MyCompany + key: HKCU:\Software\MyCompany + value: hello state: absent ''' From 9fa5152a94449830ba8ba0dec980fa2d347d4248 Mon Sep 17 00:00:00 2001 From: Adrian Muraru Date: Sun, 14 Jun 2015 13:34:15 +0300 Subject: [PATCH 121/157] Add option to send a private message in irc module --- notification/irc.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index e6852c8510a..faaa7805629 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -63,6 +63,10 @@ options: description: - Channel name required: true + nick_to: + description: + - Nick to send the message to + required: false key: description: - Channel key @@ -113,7 +117,7 @@ from time import sleep def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): + nick="ansible", nick_to=None, color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' colornumbers = { @@ -173,7 +177,10 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None irc.send('TOPIC %s :%s\r\n' % (channel, topic)) sleep(1) - irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) + if nick_to: + irc.send('PRIVMSG %s :%s\r\n' % (nick_to, message)) + else: + irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) irc.send('QUIT\r\n') @@ -191,6 +198,7 @@ def main(): server=dict(default='localhost'), port=dict(default=6667), nick=dict(default='ansible'), + nick_to=dict(), msg=dict(required=True), color=dict(default="none", choices=["yellow", "red", "green", "blue", "black", "none"]), @@ -208,6 +216,7 @@ def main(): port = module.params["port"] nick = module.params["nick"] topic = module.params["topic"] + nick_to = module.params["nick_to"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] @@ -217,7 +226,7 @@ def main(): use_ssl = module.params["use_ssl"] try: - send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl) + send_msg(channel, msg, server, port, key, topic, nick, nick_to, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) From 2dd32236ebec688a885adaddd2354f127dfd2b4d Mon Sep 17 00:00:00 2001 From: Adrian Muraru Date: Tue, 7 Jul 2015 13:35:11 +0300 Subject: [PATCH 122/157] Implemented comments --- notification/irc.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index faaa7805629..2c3c19be4dd 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -39,7 +39,7 @@ options: default: 6667 nick: description: - - Nickname. May be shortened, depending on server's NICKLEN setting. + - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. required: false default: ansible msg: @@ -65,8 +65,9 @@ options: required: true nick_to: description: - - Nick to send the message to + - A list of nicknames to send the message to. When both channel and nick_to are defined, the message will be send to both of them. required: false + version_added: 2.0 key: description: - Channel key @@ -99,7 +100,16 @@ EXAMPLES = ''' - irc: server=irc.example.net channel="#t1" msg="Hello world" - local_action: irc port=6669 + server="irc.example.net" + channel="#t1" + msg="All finished at {{ ansible_date_time.iso8601 }}" + color=red + nick=ansibleIRC + +- local_action: irc port=6669 + server="irc.example.net" channel="#t1" + nick_to=["nick1", "nick2"] msg="All finished at {{ ansible_date_time.iso8601 }}" color=red nick=ansibleIRC @@ -116,8 +126,8 @@ import ssl from time import sleep -def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None, - nick="ansible", nick_to=None, color='none', passwd=False, timeout=30, use_ssl=False): +def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None, + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' colornumbers = { @@ -178,8 +188,9 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None sleep(1) if nick_to: - irc.send('PRIVMSG %s :%s\r\n' % (nick_to, message)) - else: + for nick in nick_to: + irc.send('PRIVMSG %s :%s\r\n' % (nick, message)) + if channel: irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) @@ -198,35 +209,38 @@ def main(): server=dict(default='localhost'), port=dict(default=6667), nick=dict(default='ansible'), - nick_to=dict(), + nick_to=dict(required=False, type='list'), msg=dict(required=True), color=dict(default="none", choices=["yellow", "red", "green", "blue", "black", "none"]), - channel=dict(required=True), + channel=dict(required=False), key=dict(), topic=dict(), passwd=dict(), timeout=dict(type='int', default=30), use_ssl=dict(type='bool', default=False) ), - supports_check_mode=True + supports_check_mode=True, + required_one_of=[['channel', 'nick_to']] ) server = module.params["server"] port = module.params["port"] nick = module.params["nick"] - topic = module.params["topic"] nick_to = module.params["nick_to"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] + topic = module.params["topic"] + if topic and not channel: + module.fail_json(msg="When topic is specified, a channel is required.") key = module.params["key"] passwd = module.params["passwd"] timeout = module.params["timeout"] use_ssl = module.params["use_ssl"] try: - send_msg(channel, msg, server, port, key, topic, nick, nick_to, color, passwd, timeout, use_ssl) + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) From 8131bd3030006d52f9e33e98d556f4915d1e5f47 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 7 Jul 2015 10:49:43 -0700 Subject: [PATCH 123/157] Documentation update --- notification/irc.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index 2c3c19be4dd..1eae8ed8284 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -61,12 +61,13 @@ options: choices: [ "none", "yellow", "red", "green", "blue", "black" ] channel: description: - - Channel name + - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. required: true nick_to: description: - - A list of nicknames to send the message to. When both channel and nick_to are defined, the message will be send to both of them. + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. required: false + default: null version_added: 2.0 key: description: From 639902ff2081aa7f90e051878a3abf3f1a67eac4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 7 Jul 2015 10:53:09 -0700 Subject: [PATCH 124/157] Fix the documentation of route53_zone --- cloud/amazon/route53_zone.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py index 07a049b14f7..4630e00d4fa 100644 --- a/cloud/amazon/route53_zone.py +++ b/cloud/amazon/route53_zone.py @@ -23,7 +23,7 @@ version_added: "2.0" options: zone: description: - - The DNS zone record (eg: foo.com.) + - "The DNS zone record (eg: foo.com.)" required: true state: description: From 5fec1e3994b31e15daa2b0e7d936c2948da1a780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Wed, 8 Jul 2015 12:56:45 +0200 Subject: [PATCH 125/157] irc: add version_added to new option nick --- notification/irc.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index 1eae8ed8284..70f198883c7 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -42,6 +42,7 @@ options: - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. required: false default: ansible + version_added: "2.0" msg: description: - The message body. @@ -52,7 +53,7 @@ options: - Set the channel topic required: false default: null - version_added: 2.0 + version_added: "2.0" color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). @@ -68,12 +69,12 @@ options: - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. required: false default: null - version_added: 2.0 + version_added: "2.0" key: description: - Channel key required: false - version_added: 1.7 + version_added: "1.7" passwd: description: - Server password @@ -83,12 +84,12 @@ options: - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop default: 30 - version_added: 1.5 + version_added: "1.5" use_ssl: description: - Designates whether TLS/SSL should be used when connecting to the IRC server default: False - version_added: 1.8 + version_added: "1.8" # informational: requirements for nodes requirements: [ socket ] From 13a3e38a1124f1e3e74a5e33706bc7615a44f73b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 13:13:12 -0400 Subject: [PATCH 126/157] make token no_log in slack plugin --- notification/slack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/slack.py b/notification/slack.py index baabe4f58d2..ba4ed2e4c2d 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -177,7 +177,7 @@ def main(): module = AnsibleModule( argument_spec = dict( domain = dict(type='str', required=False, default=None), - token = dict(type='str', required=True), + token = dict(type='str', required=True, no_log=True), msg = dict(type='str', required=True), channel = dict(type='str', default=None), username = dict(type='str', default='Ansible'), From 72b9ef4830ff82ab0d35858ab33cb547982e94e3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 13:14:01 -0400 Subject: [PATCH 127/157] added missing version_added to new filesystem option --- system/filesystem.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/filesystem.py b/system/filesystem.py index b1a75fc065a..b44168a0e06 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -47,6 +47,7 @@ options: description: - If yes, if the block device and filessytem size differ, grow the filesystem into the space. Note, XFS Will only grow if mounted. required: false + version_added: "2.0" opts: description: - List of options to be passed to mkfs command. From 795425f32defdd83d9b1017ab4a124eb73451e73 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 9 Jul 2015 09:43:26 +0200 Subject: [PATCH 128/157] irc: remove version_added for nick option, should have been nick_to option --- notification/irc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/notification/irc.py b/notification/irc.py index 70f198883c7..7e34049c639 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -42,7 +42,6 @@ options: - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. required: false default: ansible - version_added: "2.0" msg: description: - The message body. From 539fd2357bf15fa4a63cfc8183d972df752119bc Mon Sep 17 00:00:00 2001 From: Siva Popuri Date: Thu, 9 Jul 2015 09:42:28 -0500 Subject: [PATCH 129/157] Removed all of the clc-ansbile-modules and kept only clc_publicip as the first module to go --- cloud/centurylink/__init__.py | 2 +- cloud/centurylink/clc_aa_policy.py | 294 ----- cloud/centurylink/clc_alert_policy.py | 473 ------- cloud/centurylink/clc_blueprint_package.py | 263 ---- cloud/centurylink/clc_firewall_policy.py | 542 -------- cloud/centurylink/clc_group.py | 370 ------ cloud/centurylink/clc_loadbalancer.py | 759 ----------- cloud/centurylink/clc_modify_server.py | 710 ----------- cloud/centurylink/clc_server.py | 1323 -------------------- cloud/centurylink/clc_server_snapshot.py | 341 ----- 10 files changed, 1 insertion(+), 5076 deletions(-) delete mode 100644 cloud/centurylink/clc_aa_policy.py delete mode 100644 cloud/centurylink/clc_alert_policy.py delete mode 100644 cloud/centurylink/clc_blueprint_package.py delete mode 100644 cloud/centurylink/clc_firewall_policy.py delete mode 100644 cloud/centurylink/clc_group.py delete mode 100644 cloud/centurylink/clc_loadbalancer.py delete mode 100644 cloud/centurylink/clc_modify_server.py delete mode 100644 cloud/centurylink/clc_server.py delete mode 100644 cloud/centurylink/clc_server_snapshot.py diff --git a/cloud/centurylink/__init__.py b/cloud/centurylink/__init__.py index 71f0abcff9d..8b137891791 100644 --- a/cloud/centurylink/__init__.py +++ b/cloud/centurylink/__init__.py @@ -1 +1 @@ -__version__ = "${version}" + diff --git a/cloud/centurylink/clc_aa_policy.py b/cloud/centurylink/clc_aa_policy.py deleted file mode 100644 index 644f3817c4f..00000000000 --- a/cloud/centurylink/clc_aa_policy.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ - -DOCUMENTATION = ''' -module: clc_aa_policy -short_descirption: Create or Delete Anti Affinity Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. -options: - name: - description: - - The name of the Anti Affinity Policy. - required: True - location: - description: - - Datacenter in which the policy lives/should live. - required: True - state: - description: - - Whether to create or delete the policy. - required: False - default: present - choices: ['present','absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - choices: [ True, False] - aliases: [] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Anti Affinity Policy - clc_aa_policy: - name: 'Hammer Time' - location: 'UK3' - state: present - register: policy - - - name: debug - debug: var=policy - ---- -- name: Delete AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Anti Affinity Policy - clc_aa_policy: - name: 'Hammer Time' - location: 'UK3' - state: absent - register: policy - - - name: debug - debug: var=policy -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - clc_found = False - clc_sdk = None -else: - clc_found = True - - -class ClcAntiAffinityPolicy(): - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not clc_found: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - alias=dict(default=None), - wait=dict(default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - if not clc_found: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinnity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - policy = self.policy_dict[p['name']] - policy.Delete() - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictonary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_alert_policy.py b/cloud/centurylink/clc_alert_policy.py deleted file mode 100644 index 75467967a85..00000000000 --- a/cloud/centurylink/clc_alert_policy.py +++ /dev/null @@ -1,473 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ - -DOCUMENTATION = ''' -module: clc_alert_policy -short_descirption: Create or Delete Alert Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -options: - alias: - description: - - The alias of your CLC Account - required: True - name: - description: - - The name of the alert policy. This is mutually exclusive with id - default: None - aliases: [] - id: - description: - - The alert policy id. This is mutually exclusive with name - default: None - aliases: [] - alert_recipients: - description: - - A list of recipient email ids to notify the alert. - required: True - aliases: [] - metric: - description: - - The metric on which to measure the condition that will trigger the alert. - required: True - default: None - choices: ['cpu','memory','disk'] - aliases: [] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. - required: True - default: None - aliases: [] - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 - required: True - default: None - aliases: [] - state: - description: - - Whether to create or delete the policy. - required: False - default: present - choices: ['present','absent'] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: debug - debug: var=policy - ---- -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Alert Policy - clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: debug - debug: var=policy -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - clc_found = False - clc_sdk = None -else: - clc_found = True - - -class ClcAlertPolicy(): - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not clc_found: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(default=None), - id=dict(default=None), - alias=dict(required=True, default=None), - alert_recipients=dict(type='list', required=False, default=None), - metric=dict(required=False, choices=['cpu', 'memory', 'disk'], default=None), - duration=dict(required=False, type='str', default=None), - threshold=dict(required=False, type='int', default=None), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - if not clc_found: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - canged: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - alias = p.get('alias') - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(alias, policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - canged: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the aliert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the targetalert policy - :return: (changed, policy) - canged: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % (alias)) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - name = p['name'] - arguments = json.dumps( - { - 'name': name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % - (alias), - arguments) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy. %s' % str( - e.response_text)) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - name = p['name'] - arguments = json.dumps( - { - 'name': name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy. %s' % str( - e.response_text)) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy. %s' % str( - e.response_text)) - return result - - def _alert_policy_exists(self, alias, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for id in self.policy_dict: - if self.policy_dict.get(id).get('name') == policy_name: - result = self.policy_dict.get(id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for id in self.policy_dict: - if self.policy_dict.get(id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = id - else: - return module.fail_json( - msg='mutiple alert policies were found with policy name : %s' % - (alert_policy_name)) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_blueprint_package.py b/cloud/centurylink/clc_blueprint_package.py deleted file mode 100644 index 80cc18a24ca..00000000000 --- a/cloud/centurylink/clc_blueprint_package.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_blueprint_package -short_desciption: deploys a blue print package on a set of servers in CenturyLink Cloud. -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - default: [] - required: True - aliases: [] - package_id: - description: - - The package id of the blue print. - default: None - required: True - aliases: [] - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - default: {} - required: False - aliases: [] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - clc_blueprint_package: - server_ids: - - UC1WFSDANS01 - - UC1WFSDANS02 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcBlueprintPackage(): - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_clc_credentials_from_env() - - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, requests = self.ensure_package_installed( - server_ids, package_id, package_params) - if not self.module.check_mode: - self._wait_for_requests_to_complete(requests) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defnines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the package id - :param package_params: the package arguments - :return: (changed, server_ids) - changed: A flag indicating if a change was made - server_ids: The list of servers modfied - """ - changed = False - requests = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - try: - for server in servers: - request = self.clc_install_package( - server, - package_id, - package_params) - requests.append(request) - changed = True - except CLCException as ex: - self.module.fail_json( - msg='Failed while installing package : %s with Error : %s' % - (package_id, ex)) - return changed, server_ids, requests - - def clc_install_package(self, server, package_id, package_params): - """ - Read all servers from CLC and executes each package from package_list - :param server_list: The target list of servers where the packages needs to be installed - :param package_list: The list of packages to be installed - :return: (changed, server_ids) - changed: A flag indicating if a change was made - server_ids: The list of servers modfied - """ - result = None - if not self.module.check_mode: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param the list server ids - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - -from ansible.module_utils.basic import * -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_firewall_policy.py b/cloud/centurylink/clc_firewall_policy.py deleted file mode 100644 index 260c82bc885..00000000000 --- a/cloud/centurylink/clc_firewall_policy.py +++ /dev/null @@ -1,542 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); - -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_firewall_policy -short_desciption: Create/delete/update firewall policies -description: - - Create or delete or updated firewall polices on Centurylink Centurylink Cloud -options: - location: - description: - - Target datacenter for the firewall policy - default: None - required: True - aliases: [] - state: - description: - - Whether to create or delete the firewall policy - default: present - required: True - choices: ['present', 'absent'] - aliases: [] - source: - description: - - Source addresses for traffic on the originating firewall - default: None - required: For Creation - aliases: [] - destination: - description: - - Destination addresses for traffic on the terminating firewall - default: None - required: For Creation - aliases: [] - ports: - description: - - types of ports associated with the policy. TCP & UDP can take in single ports or port ranges. - default: None - required: False - choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'] - aliases: [] - firewall_policy_id: - description: - - Id of the firewall policy - default: None - required: False - aliases: [] - source_account_alias: - description: - - CLC alias for the source account - default: None - required: True - aliases: [] - destination_account_alias: - description: - - CLC alias for the destination account - default: None - required: False - aliases: [] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - choices: [ True, False ] - aliases: [] - enabled: - description: - - If the firewall policy is enabled or disabled - default: true - required: False - choices: [ true, false ] - aliases: [] - -''' - -EXAMPLES = ''' ---- -- name: Create Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - ---- -- name: Delete Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - firewall_policy_id: c62105233d7a4231bd2e91b9c791eaae -''' - -__version__ = '${version}' - -import urlparse -from time import sleep -import requests - -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcFirewallPolicy(): - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True, defualt=None), - source_account_alias=dict(required=True, default=None), - destination_account_alias=dict(default=None), - firewall_policy_id=dict(default=None), - ports=dict(default=None, type='list'), - source=dict(defualt=None, type='list'), - destination=dict(defualt=None, type='list'), - wait=dict(default=True), - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(defualt=None) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - requests = [] - - if state == 'absent': - changed, firewall_policy_id, response = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, response = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - else: - return self.module.fail_json(msg="Unknown State: " + state) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation control - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse.urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary or request parameters for firewall policy creation - :return: (changed, firewall_policy, response) - changed: flag for if a change occurred - firewall_policy: policy that was changed - response: response from CLC API call - """ - changed = False - response = {} - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - self._wait_for_requests_to_complete( - firewall_dict.get('wait'), - source_account_alias, - location, - firewall_policy_id) - changed = True - else: - get_before_response, success = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not success: - return self.module.fail_json( - msg='Unable to find the firewall policy id : %s' % - firewall_policy_id) - changed = self._compare_get_request_with_dict( - get_before_response, - firewall_dict) - if not self.module.check_mode and changed: - response = self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - self._wait_for_requests_to_complete( - firewall_dict.get('wait'), - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, response - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: policy that was changed - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result, success = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if success: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary or request parameters for firewall policy creation - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to successfully create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to successfully delete firewall policy. %s" % - str(e.response_text)) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy to delete - :param firewall_dict: dictionary or request parameters for firewall policy creation - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except self.clc.APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to successfully update firewall policy. %s" % - str(e.response_text)) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary or request parameters for firewall policy creation - :return: changed: Boolean that returns true if there are differences between the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response from CLC API call - """ - response = [] - success = False - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - success = True - except: - pass - return response, success - - def _wait_for_requests_to_complete( - self, - wait, - source_account_alias, - location, - firewall_policy_id): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if wait: - response, success = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if response.get('status') == 'pending': - sleep(2) - self._wait_for_requests_to_complete( - wait, - source_account_alias, - location, - firewall_policy_id) - return None - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_group.py b/cloud/centurylink/clc_group.py deleted file mode 100644 index a4fd976d429..00000000000 --- a/cloud/centurylink/clc_group.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_group -short_desciption: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud -options: - name: - description: - - The name of the Server Group - description: - description: - - A description of the Server Group - parent: - description: - - The parent group of the server group - location: - description: - - Datacenter to create the group in - state: - description: - - Whether to create or delete the group - default: present - choices: ['present', 'absent'] - -''' - -EXAMPLES = ''' - -# Create a Server Group - ---- -- name: Create Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - clc_group: - name: 'My Cool Server Group' - parent: 'Default Group' - state: present - register: clc - - - name: debug - debug: var=clc - -# Delete a Server Group - ---- -- name: Delete Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - clc_group: - name: 'My Cool Server Group' - parent: 'Default Group' - state: absent - register: clc - - - name: debug - debug: var=clc - -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, response = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - - else: - changed, group, response = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - - - self.module.exit_json(changed=changed, group=group_name) - - # - # Functions to define the Ansible module and its arguments - # - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - parent=dict(default=None), - location=dict(default=None), - alias=dict(default=None), - custom_fields=dict(type='list', default=[]), - server_ids=dict(type='list', default=[]), - state=dict(default='present', choices=['present', 'absent'])) - - return argument_spec - - # - # Module Behavior Functions - # - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - for g in group: - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - group, parent = self.group_dict.get(group_name) - response = group.Delete() - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - assert self.root_group, "Implementation Error: Root Group not set" - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - results = [] - groups = [] - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists(group_name=group_name, parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - groups.append(group_name) - for g in groups: - group = self._create_group( - group=group, - parent=parent, - description=description) - results.append(group) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group, results - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - - (parent, grandparent) = self.group_dict[parent] - return parent.Create(name=group, description=description) - - # - # Utility Functions - # - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None, alias=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :param alias: string - the account alias to search. Defaults to the current user's account - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcGroup._define_module_argument_spec(), supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_loadbalancer.py b/cloud/centurylink/clc_loadbalancer.py deleted file mode 100644 index 058954c687b..00000000000 --- a/cloud/centurylink/clc_loadbalancer.py +++ /dev/null @@ -1,759 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: -short_desciption: Create, Delete shared loadbalancers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -options: -options: - name: - description: - - The name of the loadbalancer - required: True - description: - description: - - A description for your loadbalancer - alias: - description: - - The alias of your CLC Account - required: True - location: - description: - - The location of the datacenter your load balancer resides in - required: True - method: - description: - -The balancing method for this pool - default: roundRobin - choices: ['sticky', 'roundRobin'] - persistence: - description: - - The persistence method for this load balancer - default: standard - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool - choices: [80, 443] - nodes: - description: - - A list of nodes that you want added to your load balancer pool - status: - description: - - The status of your loadbalancer - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - { 'ipAddress': '10.11.22.234', 'privatePort': 80 } - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - { 'ipAddress': '10.11.22.234', 'privatePort': 80 } - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } - state: absent - -''' - -__version__ = '${version}' - -import requests -from time import sleep - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -class ClcLoadBalancer(): - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - - loadbalancer_name=self.module.params.get('name') - loadbalancer_alias=self.module.params.get('alias') - loadbalancer_location=self.module.params.get('location') - loadbalancer_description=self.module.params.get('description') - loadbalancer_port=self.module.params.get('port') - loadbalancer_method=self.module.params.get('method') - loadbalancer_persistence=self.module.params.get('persistence') - loadbalancer_nodes=self.module.params.get('nodes') - loadbalancer_status=self.module.params.get('status') - state=self.module.params.get('state') - - if loadbalancer_description == None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list(alias=loadbalancer_alias, location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present(name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set(alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes - ) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent(name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent(alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present(alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent(alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - # - # Functions to define the Ansible module and its arguments - # - def ensure_loadbalancer_present(self,name,alias,location,description,status): - """ - Check for loadbalancer presence (available) - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: True / False - """ - changed = False - result = None - lb_id = self._loadbalancer_exists(name=name) - if lb_id: - result = name - changed = False - else: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present(self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param name: The loadbalancer name - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the pool - """ - changed = False - result = None - if not lb_id: - return False, None, None - pool_id = self._loadbalancerpool_exists(alias=alias, location=location, port=port, lb_id=lb_id) - if not pool_id: - changed = True - if not self.module.check_mode: - result = self.create_loadbalancerpool(alias=alias, location=location, lb_id=lb_id, method=method, persistence=persistence, port=port) - pool_id = result.get('id') - - else: - changed = False - result = port - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self,name,alias,location): - """ - Check for loadbalancer presence (not available) - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - else: - result = name - changed = False - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param loadbalancer: the name of the load balancer - :param port: the port that the load balancer will listen on - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists(alias=alias, location=location, port=port, lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool(alias=alias, location=location, lb_id=lb_id, pool_id=pool_id) - else: - changed = False - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and set the nodes if any in the list doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists(alias=alias, location=location, port=port, lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - port=port, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists(alias=alias, location=location, port=port, lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists(alias=alias, location=location, port=port, lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self,name,alias,location,description,status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: Success / Failure - """ - result = self.clc.v2.API.Call('POST', '/v2/sharedLoadBalancers/%s/%s' % (alias, location), json.dumps({"name":name,"description":description,"status":status})) - sleep(1) - return result - - def create_loadbalancerpool(self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = self.clc.v2.API.Call('POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % (alias, location, lb_id), json.dumps({"port":port, "method":method, "persistence":persistence})) - return result - - def delete_loadbalancer(self,alias,location,name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: 204 if successful else failure - """ - lb_id = self._get_loadbalancer_id(name=name) - result = self.clc.v2.API.Call('DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % (alias, location, lb_id)) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete a pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The result from the delete API call - """ - result = self.clc.v2.API.Call('DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % (alias, location, lb_id, pool_id)) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieve unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of loadbalancer - """ - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - return self.clc.v2.API.Call('GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - pool_list = self.clc.v2.API.Call('GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % (alias, location, lb_id)) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - - return result - - def _loadbalancerpool_nodes_exists(self, alias, location, port, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: The id string of the pool or False - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to add - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if not node in nodes: - changed = True - nodes.append(node) - if changed == True and not self.module.check_mode: - result = self.set_loadbalancernodes(alias, location, lb_id, pool_id, nodes) - return changed, result - - def remove_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to remove - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed == True and not self.module.check_mode: - result = self.set_loadbalancernodes(alias, location, lb_id, pool_id, nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - location=dict(required=True, default=None), - alias=dict(required=True, default=None), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[]), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict(default='present', choices=['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']), - wait=dict(type='bool', default=True) - ) - - return argument_spec - - # - # Module Behavior Functions - # - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_modify_server.py b/cloud/centurylink/clc_modify_server.py deleted file mode 100644 index 1a1e4d5b858..00000000000 --- a/cloud/centurylink/clc_modify_server.py +++ /dev/null @@ -1,710 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_modify_server -short_desciption: modify servers in CenturyLink Cloud. -description: - - An Ansible module to modify servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to modify. - default: [] - required: True - aliases: [] - cpu: - description: - - How many CPUs to update on the server - default: None - required: False - aliases: [] - memory: - description: - - Memory in GB. - default: None - required: False - aliases: [] - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a heperscale server. - This is mutually exclusive with 'anti_affinity_policy_name' - default: None - required: False - aliases: [] - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a heperscale server. - This is mutually exclusive with 'anti_affinity_policy_id' - default: None - required: False - aliases: [] - alert_policy_id: - description: - - The alert policy id to be associated. - This is mutually exclusive with 'alert_policy_name' - default: None - required: False - aliases: [] - alert_policy_name: - description: - - The alert policy name to be associated. - This is mutually exclusive with 'alert_policy_id' - default: None - required: False - aliases: [] - state: - description: - - The state to insure that the provided resources are in. - default: 'present' - required: False - choices: ['present', 'absent'] - aliases: [] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - choices: [ True, False] - aliases: [] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: set the cpu count to 4 on a server - clc_server: - server_ids: ['UC1ACCTTEST01'] - cpu: 4 - state: present - -- name: set the memory to 8GB on a server - clc_server: - server_ids: ['UC1ACCTTEST01'] - memory: 8 - state: present - -- name: set the anti affinity policy on a server - clc_server: - server_ids: ['UC1ACCTTEST01'] - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: set the alert policy on a server - clc_server: - server_ids: ['UC1ACCTTEST01'] - alert_policy_name: 'alert_policy' - state: present - -- name: set the memory to 16GB and cpu to 8 core on a lust if servers - clc_server: - server_ids: ['UC1ACCTTEST01','UC1ACCTTEST02'] - cpu: 8 - memory: 16 - state: present -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcModifyServer(): - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, new_server_ids) = ClcModifyServer._modify_servers( - module=self.module, clc=self.clc, server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _wait_for_requests(clc, requests, servers, wait): - """ - Block until server provisioning requests are completed. - :param clc: the clc-sdk instance to use - :param requests: a list of clc-sdk.Request instances - :param servers: a list of servers to refresh - :param wait: a boolean on whether to block or not. This function is skipped if True - :return: none - """ - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in requests]) - - if failed_requests_count > 0: - raise clc - else: - ClcModifyServer._refresh_servers(servers) - - @staticmethod - def _refresh_servers(servers): - """ - Loop through a list of servers and refresh them - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - server.Refresh() - - @staticmethod - def _modify_servers(module, clc, server_ids): - """ - modify the servers configuration on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = module.params - wait = p.get('wait') - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - requests = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - if state == 'present': - for server in servers: - server_changed, server_result, changed_servers = ClcModifyServer._ensure_server_config( - clc, module, None, server, server_params) - if server_result: - requests.append(server_result) - aa_changed, changed_servers = ClcModifyServer._ensure_aa_policy( - clc, module, None, server, server_params) - ap_changed, changed_servers = ClcModifyServer._ensure_alert_policy_present( - clc, module, None, server, server_params) - elif state == 'absent': - for server in servers: - ap_changed, changed_servers = ClcModifyServer._ensure_alert_policy_absent( - clc, module, None, server, server_params) - if server_changed or aa_changed or ap_changed: - changed = True - - if wait: - for r in requests: - r.WaitUntilComplete() - for server in changed_servers: - server.Refresh() - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _ensure_server_config( - clc, module, alias, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - changed_servers = [] - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - changed_servers.append(server) - result = ClcModifyServer._modify_clc_server( - clc, - module, - None, - server.id, - cpu, - memory) - changed = True - return changed, result, changed_servers - - @staticmethod - def _modify_clc_server(clc, module, acct_alias, server_id, cpu, memory): - """ - Modify the memory or CPU on a clc server. This function is not yet implemented. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the clc account alias to look up the server - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - if not acct_alias: - acct_alias = clc.v2.Account.GetAlias() - if not server_id: - return module.fail_json( - msg='server_id must be provided to modify the server') - - result = None - - if not module.check_mode: - - # Update the server configuation - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - return result - - @staticmethod - def _ensure_aa_policy( - clc, module, acct_alias, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - changed_servers = [] - - if not acct_alias: - acct_alias = clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = ClcModifyServer._get_aa_policy_id_by_name( - clc, - module, - acct_alias, - aa_policy_name) - current_aa_policy_id = ClcModifyServer._get_aa_policy_id_of_server( - clc, - module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - if server not in changed_servers: - changed_servers.append(server) - ClcModifyServer._modify_aa_policy( - clc, - module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed, changed_servers - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % (alias)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='mutiple anti affinity policies were found with policy name : %s' % - (aa_policy_name)) - if not aa_policy_id: - return module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % - (aa_policy_name)) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as e: - if e.response_status_code != 404: - raise e - return aa_policy_id - - @staticmethod - def _ensure_alert_policy_present( - clc, module, acct_alias, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - changed_servers = [] - - if not acct_alias: - acct_alias = clc.v2.Account.GetAlias() - - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = ClcModifyServer._get_alert_policy_id_by_name( - clc, - module, - acct_alias, - alert_policy_name) - if alert_policy_id and not ClcModifyServer._alert_policy_exists(server, alert_policy_id): - if server not in changed_servers: - changed_servers.append(server) - ClcModifyServer._add_alert_policy_to_server( - clc, - module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed, changed_servers - - @staticmethod - def _ensure_alert_policy_absent( - clc, module, acct_alias, server, server_params): - """ - ensures the alert policy is removed from the server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - changed_servers = [] - - if not acct_alias: - acct_alias = clc.v2.Account.GetAlias() - - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = ClcModifyServer._get_alert_policy_id_by_name( - clc, - module, - acct_alias, - alert_policy_name) - - if alert_policy_id and ClcModifyServer._alert_policy_exists(server, alert_policy_id): - if server not in changed_servers: - changed_servers.append(server) - ClcModifyServer._remove_alert_policy_to_server( - clc, - module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed, changed_servers - - @staticmethod - def _add_alert_policy_to_server(clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except clc.APIFailedResponse as e: - return module.fail_json( - msg='Unable to set alert policy to the server : %s. %s' % (server_id, str(e.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server(clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except clc.APIFailedResponse as e: - return module.fail_json( - msg='Unable to remove alert policy to the server : %s. %s' % (server_id, str(e.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % (alias)) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='mutiple alert policies were found with policy name : %s' % - (alert_policy_name)) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_server.py b/cloud/centurylink/clc_server.py deleted file mode 100644 index e102cd21f47..00000000000 --- a/cloud/centurylink/clc_server.py +++ /dev/null @@ -1,1323 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_server -short_desciption: Create, Delete, Start and Stop servers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -options: - additional_disks: - description: - - Specify additional disks for the server - required: False - default: None - aliases: [] - add_public_ip: - description: - - Whether to add a public ip to the server - required: False - default: False - choices: [False, True] - aliases: [] - alias: - description: - - The account alias to provision the servers under. - default: - - The default alias for the API credentials - required: False - default: None - aliases: [] - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. - required: False - default: None - aliases: [] - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. - required: False - default: None - aliases: [] - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. - required: False - default: None - aliases: [] - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. - required: False - default: None - aliases: [] - - count: - description: - - The number of servers to build (mutually exclusive with exact_count) - default: None - aliases: [] - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many severs to deploy. - default: 1 - required: False - aliases: [] - cpu: - description: - - How many CPUs to provision on the server - default: None - required: False - aliases: [] - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - default: None - required: False - aliases: [] - custom_fields: - description: - - A dictionary of custom fields to set on the server. - default: [] - required: False - aliases: [] - description: - description: - - The description to set for the server. - default: None - required: False - aliases: [] - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, creating and deleting them to reach that count. Requires count_group to be set. - default: None - required: False - aliases: [] - group: - description: - - The Server Group to create servers under. - default: 'Default Group' - required: False - aliases: [] - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - default: None - required: False - aliases: [] - location: - description: - - The Datacenter to create servers in. - default: None - required: False - aliases: [] - managed_os: - description: - - Whether to create the server as 'Managed' or not. - default: False - required: False - choices: [True, False] - aliases: [] - memory: - description: - - Memory in GB. - default: 1 - required: False - aliases: [] - name: - description: - - A 1 to 6 character identifier to use for the server. - default: None - required: False - aliases: [] - network_id: - description: - - The network UUID on which to create servers. - default: None - required: False - aliases: [] - packages: - description: - - Blueprints to run on the server after its created. - default: [] - required: False - aliases: [] - password: - description: - - Password for the administrator user - default: None - required: False - aliases: [] - primary_dns: - description: - - Primary DNS used by the server. - default: None - required: False - aliases: [] - public_ip_protocol: - description: - - The protocol to use for the public ip if add_public_ip is set to True. - default: 'TCP' - required: False - aliases: [] - public_ip_ports: - description: - - A list of ports to allow on the firewall to thes servers public ip, if add_public_ip is set to True. - default: [] - required: False - aliases: [] - secondary_dns: - description: - - Secondary DNS used by the server. - default: None - required: False - aliases: [] - server_ids: - description: - - Required for started, stopped, and absent states. A list of server Ids to insure are started, stopped, or absent. - default: [] - required: False - aliases: [] - source_server_password: - description: - - The password for the source server if a clone is specified. - default: None - required: False - aliases: [] - state: - description: - - The state to insure that the provided resources are in. - default: 'present' - required: False - choices: ['present', 'absent', 'started', 'stopped'] - aliases: [] - storage_type: - description: - - The type of storage to attach to the server. - default: 'standard' - required: False - choices: ['standard', 'hyperscale'] - aliases: [] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. - default: None - required: false - aliases: [] - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - default: None - required: False - aliases: [] - type: - description: - - The type of server to create. - default: 'standard' - required: False - choices: ['standard', 'hyperscale'] - aliases: [] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - choices: [ True, False] - aliases: [] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: 'Default Group' - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: 'Default Group' - group: 'Default Group' - -- name: Stop a Server - clc_server: - server_ids: ['UC1ACCTTEST01'] - state: stopped - -- name: Start a Server - clc_server: - server_ids: ['UC1ACCTTEST01'] - state: started - -- name: Delete a Server - clc_server: - server_ids: ['UC1ACCTTEST01'] - state: absent -''' - -__version__ = '${version}' - -import requests -from time import sleep - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcServer(): - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - self.module.params = ClcServer._validate_module_params(self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = ClcServer._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = ClcServer._startstop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template'): - self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - changed) = ClcServer._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - changed) = ClcServer._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict(name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(default=None), - cpu=dict(default=1), - memory=dict(default='1'), - alias=dict(default=None), - password=dict(default=None), - ip_address=dict(default=None), - storage_type=dict(default='standard'), - type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - primary_dns=dict(default=None), - secondary_dns=dict(default=None), - additional_disks=dict(type='list', default=[]), - custom_fields=dict(type='list', default=[]), - ttl=dict(default=None), - managed_os=dict(type='bool', default=False), - description=dict(default=None), - source_server_password=dict(default=None), - cpu_autoscale_policy_id=dict(default=None), - anti_affinity_policy_id=dict(default=None), - anti_affinity_policy_name=dict(default=None), - alert_policy_id=dict(default=None), - alert_policy_name=dict(default=None), - packages=dict(type='list', default=[]), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default='1'), - exact_count=dict(type='int', default=None), - count_group=dict(), - server_ids=dict(type='list'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict(default='TCP'), - public_ip_ports=dict(type='list'), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - datacenter = clc.v2.Datacenter(location) - return datacenter - except CLCException: - module.fail_json(msg=str("Unable to find location: " + location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - alias = clc.v2.Account.GetAlias() - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Cannot determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Cannot determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - result = None - - if state == 'present': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _create_servers(module, clc, override_count=None): - """ - Create New Servers - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - requests = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - wait = p.get('wait') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'packages': p.get('packages') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if changed: - for i in range(0, count): - if not module.check_mode: - req = ClcServer._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - requests.append(req) - servers.append(server) - - ClcServer._wait_for_requests(clc, requests, servers, wait) - - ClcServer._add_public_ip_to_servers( - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports, - wait=wait) - ClcServer._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - # reload server details - server = clc.v2.Server(server.id) - - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - - server_dict_array.append(server.data) - created_server_ids.append(server.id) - - return server_dict_array, created_server_ids, changed - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it's not - :param module: the module to validate - :return: none - """ - name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and (len(name) < 1 or len(name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - -# -# Functions to execute the module's behaviors -# (called from main()) -# - - @staticmethod - def _enforce_count(module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed_server_ids = None - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - changed = True - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, changed \ - = ClcServer._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - changed = True - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, changed - - @staticmethod - def _wait_for_requests(clc, requests, servers, wait): - """ - Block until server provisioning requests are completed. - :param clc: the clc-sdk instance to use - :param requests: a list of clc-sdk.Request instances - :param servers: a list of servers to refresh - :param wait: a boolean on whether to block or not. This function is skipped if True - :return: none - """ - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in requests]) - - if failed_requests_count > 0: - raise clc - else: - ClcServer._refresh_servers(servers) - - @staticmethod - def _refresh_servers(servers): - """ - Loop through a list of servers and refresh them - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - server.Refresh() - - @staticmethod - def _add_public_ip_to_servers( - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports, - wait): - """ - Create a public IP for servers - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :param wait: boolean - whether to block until the provisioning requests complete - :return: none - """ - - if should_add_public_ip: - ports_lst = [] - requests = [] - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - - for server in servers: - requests.append(server.PublicIPs().Add(ports_lst)) - - if wait: - for r in requests: - r.WaitUntilComplete() - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate an alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: none - """ - p = module.params - alert_policy_id = p.get('alert_policy_id') - alert_policy_name = p.get('alert_policy_name') - alias = p.get('alias') - if not alert_policy_id and alert_policy_name: - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' - % (alert_policy_name)) - for server in servers: - ClcServer._add_alert_policy_to_server( - clc=clc, - module=module, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - - @staticmethod - def _add_alert_policy_to_server(clc, module, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param serverid: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except clc.APIFailedResponse as e: - return module.fail_json( - msg='Failed to associate alert policy to the server : %s with Error %s' - % (server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % (alias)) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='mutiple alert policies were found with policy name : %s' % - (alert_policy_name)) - return alert_policy_id - - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - # Whether to wait for termination to complete before returning - p = module.params - wait = p.get('wait') - terminated_server_ids = [] - server_dict_array = [] - requests = [] - - changed = False - if not isinstance(server_ids, list) or len(server_ids) < 1: - module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - changed = True - - for server in servers: - if not module.check_mode: - requests.append(server.Delete()) - - if wait: - for r in requests: - r.WaitUntilComplete() - - for server in servers: - terminated_server_ids.append(server.id) - - return changed, server_dict_array, terminated_server_ids - - @staticmethod - def _startstop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - wait = p.get('wait') - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - requests = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - requests.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - if wait: - for r in requests: - r.WaitUntilComplete() - for server in changed_servers: - server.Refresh() - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - result = server.PowerOff() - except: - module.fail_json( - msg='Unable to change state for server {0}'.format( - server.id)) - return result - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - result = None - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - server_params.get('alias'), - aa_policy_name) - - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': aa_policy_id, - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages')})) - - result = clc.v2.Requests(res) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % (alias)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='mutiple anti affinity policies were found with policy name : %s' % - (aa_policy_name)) - if not aa_policy_id: - return module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % - (aa_policy_name)) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lamda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, backout=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param svr_uuid: UUID of the server - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - return - if retries == 0: - module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - return - - sleep(backout) - backout = backout * 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - -from ansible.module_utils.basic import * # pylint: disable=W0614 -if __name__ == '__main__': - main() diff --git a/cloud/centurylink/clc_server_snapshot.py b/cloud/centurylink/clc_server_snapshot.py deleted file mode 100644 index 9ca1474f248..00000000000 --- a/cloud/centurylink/clc_server_snapshot.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python - -# CenturyLink Cloud Ansible Modules. -# -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. -# -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team -# -# Copyright 2015 CenturyLink Cloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ -# - -DOCUMENTATION = ''' -module: clc_server -short_desciption: Create, Delete and Restore server snapshots in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to snapshot. - default: [] - required: True - aliases: [] - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - default: 7 - required: False - aliases: [] - state: - description: - - The state to insure that the provided resources are in. - default: 'present' - required: False - choices: ['present', 'absent', 'restore'] - aliases: [] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - choices: [ True, False] - aliases: [] -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - clc_server_snapshot: - server_ids: - - UC1WFSDTEST01 - - UC1WFSDTEST02 - expiration_days: 10 - wait: True - state: present - -- name: Restore server snapshot - clc_server_snapshot: - server_ids: - - UC1WFSDTEST01 - - UC1WFSDTEST02 - wait: True - state: restore - -- name: Delete server snapshot - clc_server_snapshot: - server_ids: - - UC1WFSDTEST01 - - UC1WFSDTEST02 - wait: True - state: absent -''' - -__version__ = '${version}' - -import requests - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - clc_found = False - clc_sdk = None -else: - CLC_FOUND = True - - -class ClcSnapshot(): - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - if not CLC_FOUND: - self.module.fail_json( - msg='clc-python-sdk required for this module') - - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - - if not server_ids: - return self.module.fail_json(msg='List of Server ids are required') - - self._set_clc_credentials_from_env() - if state == 'present': - changed, requests, changed_servers = self.ensure_server_snapshot_present(server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, requests, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, requests, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - - self._wait_for_requests_to_complete(requests) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, result, changed_servers) - changed: A flag indicating whether any change was made - result: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - result = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - res = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - result.append(res) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, result, changed_servers - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, result, changed_servers) - changed: A flag indicating whether any change was made - result: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - result = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - res = server.DeleteSnapshot() - result.append(res) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, result, changed_servers - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, result, changed_servers) - changed: A flag indicating whether any change was made - result: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - result = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - res = server.RestoreSnapshot() - result.append(res) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, result, changed_servers - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defnines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True), - expiration_days=dict(default=7), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param the list server ids - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - -from ansible.module_utils.basic import * -if __name__ == '__main__': - main() From 1ccb21bd182954a6802cf97451a5d17c5f57a17e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 10 Jul 2015 17:36:20 +0200 Subject: [PATCH 130/157] cloudstack: cs_instance: fix missing resource error in check mode if instance is not yet present --- cloud/cloudstack/cs_instance.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 79b1c58a586..7c2c117604d 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -485,8 +485,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): instance = self.deploy_instance() else: instance = self.update_instance(instance) - - instance = self.ensure_tags(resource=instance, resource_type='UserVm') + + # In check mode, we do not necessarely have an instance + if instance: + instance = self.ensure_tags(resource=instance, resource_type='UserVm') return instance From 998bba4a211dcad5a50cef9f048ccf43aa1c2905 Mon Sep 17 00:00:00 2001 From: Boris Ekelchik Date: Mon, 13 Jul 2015 12:05:47 -0700 Subject: [PATCH 131/157] Added changes requested by reviewers Copied @wimnat incorporating changes requested in feedback comments --- cloud/amazon/sts_assume_role.py | 152 +++++++++++++++----------------- 1 file changed, 70 insertions(+), 82 deletions(-) diff --git a/cloud/amazon/sts_assume_role.py b/cloud/amazon/sts_assume_role.py index 7e02dbbd84e..7eec28b843a 100644 --- a/cloud/amazon/sts_assume_role.py +++ b/cloud/amazon/sts_assume_role.py @@ -17,75 +17,69 @@ DOCUMENTATION = ''' --- module: sts_assume_role -short_description: assume a role in AWS account and obtain temporary credentials. +short_description: Assume a role using AWS Security Token Service and obtain temporary credentials description: - - call AWS STS (Security Token Service) to assume a role in AWS account and obtain temporary credentials. This module has a dependency on python-boto. - For details on base AWS API reference http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html -version_added: "1.7" + - Assume a role using AWS Security Token Service and obtain temporary credentials +version_added: "2.0" +author: Boris Ekelchik (@bekelchik) options: role_arn: description: - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) required: true - aliases: [] role_session_name: description: - Name of the role's session - will be used by CloudTrail required: true - aliases: [] policy: description: - Supplemental policy to use in addition to assumed role's policies. required: false default: null - aliases: [] duration_seconds: description: - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. required: false default: null - aliases: [] external_id: description: - A unique identifier that is used by third parties to assume a role in their customers' accounts. required: false default: null - aliases: [] mfa_serial_number: description: - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. required: false default: null - aliases: [] mfa_token: description: - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. required: false default: null - aliases: [] - -author: Boris Ekelchik +notes: + - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Basic example of assuming a role -tasks: -- name: assume a role in account 123456789012 - sts_assume_role: role_arn="arn:aws:iam::123456789012:role/someRole" session_name="someRoleSession" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +sts_assume_role: + role_arn: "arn:aws:iam::123456789012:role/someRole" + session_name: "someRoleSession" +register: assumed_role + +# Use the assumed role above to tag an instance in account 123456789012 +ec2_tag: + aws_access_key: "{{ assumed_role.sts_creds.access_key }}" + aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" + security_token: "{{ assumed_role.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value -- name: display temporary credentials - debug: "temporary credentials for the assumed role are {{ ansible_temp_credentials }}" - -- name: use temporary credentials for tagging an instance in account 123456789012 - ec2_tag: resource=i-xyzxyz01 region=us-west-1 state=present - args: - aws_access_key: "{{ ansible_temp_credentials.access_key }}" - aws_secret_key: "{{ ansible_temp_credentials.secret_key }}" - security_token: "{{ ansible_temp_credentials.session_token }}" - - tags: - Test: value ''' import sys @@ -93,71 +87,65 @@ import time try: import boto.sts - + from boto.exception import BotoServerError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def sts_connect(module): - - """ Return an STS connection""" - - region, ec2_url, boto_params = get_aws_connection_info(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - sts = connect_to_aws(boto.sts, region, **boto_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - # Otherwise, no region so we fallback to connect_sts method - else: - try: - sts = boto.connect_sts(**boto_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - - return sts + HAS_BOTO = False + -def assumeRole(): - data = sts.assume_role() - return data +def assume_role_policy(connection, module): + + role_arn = module.params.get('role_arn') + role_session_name = module.params.get('role_session_name') + policy = module.params.get('policy') + duration_seconds = module.params.get('duration_seconds') + external_id = module.params.get('external_id') + mfa_serial_number = module.params.get('mfa_serial_number') + mfa_token = module.params.get('mfa_token') + changed = False + + try: + assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token) + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__) def main(): argument_spec = ec2_argument_spec() - argument_spec.update(dict( - role_arn = dict(required=True), - role_session_name = dict(required=True), - duraction_seconds = dict(), - external_id = dict(), - policy = dict(), - mfa_serial_number = dict(), - mfa_token = dict(), + argument_spec.update( + dict( + role_arn = dict(required=True, default=None), + role_session_name = dict(required=True, default=None), + duration_seconds = dict(required=False, default=None, type='int'), + external_id = dict(required=False, default=None), + policy = dict(required=False, default=None), + mfa_serial_number = dict(required=False, default=None), + mfa_token = dict(required=False, default=None) ) ) + module = AnsibleModule(argument_spec=argument_spec) - role_arn = module.params.get('role_arn') - role_session_name = module.params.get('role_session_name') - policy = module.params.get('policy') - duraction_seconds = module.params.get('duraction_seconds') - external_id = module.params.get('external_id') - mfa_serial_number = module.params.get('mfa_serial_number') - mfa_token = module.params.get('mfa_token') - - sts = sts_connect(module) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') - temp_credentials = {} + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - temp_credentials = sts.assume_role(role_arn, role_session_name, policy, duraction_seconds, - external_id, mfa_serial_number, mfa_token).credentials.__dict__ - except boto.exception.BotoServerError, e: - module.fail_json(msg='Unable to assume role {0}, error: {1}'.format(role_arn, e)) - result = dict(changed=False, ansible_facts=dict(ansible_temp_credentials=temp_credentials)) + if region: + try: + connection = connect_to_aws(boto.sts, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") - module.exit_json(**result) + try: + assume_role_policy(connection, module) + except BotoServerError, e: + module.fail_json(msg=e) + # import module snippets from ansible.module_utils.basic import * From 3ac3f02c324daf80f4a9a5176e13998517fab9a3 Mon Sep 17 00:00:00 2001 From: Siva Popuri Date: Mon, 13 Jul 2015 15:52:00 -0500 Subject: [PATCH 132/157] changes to include PR review comments --- cloud/centurylink/clc_publicip.py | 66 +++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/cloud/centurylink/clc_publicip.py b/cloud/centurylink/clc_publicip.py index 2e525a51455..ed3228e1996 100644 --- a/cloud/centurylink/clc_publicip.py +++ b/cloud/centurylink/clc_publicip.py @@ -31,11 +31,13 @@ module: clc_publicip short_description: Add and Delete public ips on servers in CenturyLink Cloud. description: - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. +version_added: 1.0 options: protocol: descirption: - The protocol that the public IP will listen for. default: TCP + choices: ['TCP', 'UDP', 'ICMP'] required: False ports: description: @@ -58,6 +60,20 @@ options: choices: [ True, False ] default: True required: False +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME: the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD: the account passwod for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN: the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS: the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' @@ -101,7 +117,14 @@ EXAMPLES = ''' __version__ = '${version}' -import requests +from distutils.version import LooseVersion + +try: + import requests +except ImportError: + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True # # Requires the clc-python-sdk. @@ -130,6 +153,12 @@ class ClcPublicIp(object): if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') + if not REQUESTS_FOUND: + self.module.fail_json( + msg='requests library is required for this module') + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) @@ -169,8 +198,8 @@ class ClcPublicIp(object): """ argument_spec = dict( server_ids=dict(type='list', required=True), - protocol=dict(default='TCP'), - ports=dict(type='list'), + protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), + ports=dict(type='list', required=True), wait=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), ) @@ -200,12 +229,22 @@ class ClcPublicIp(object): for port in ports] for server in servers_to_change: if not self.module.check_mode: - result = server.PublicIPs().Add(ports_to_expose) + result = self._add_publicip_to_server(server, ports_to_expose) results.append(result) changed_server_ids.append(server.id) changed = True return changed, changed_server_ids, results + def _add_publicip_to_server(self, server, ports_to_expose): + result = None + try: + result = server.PublicIPs().Add(ports_to_expose) + except CLCException, ex: + self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + def ensure_public_ip_absent(self, server_ids): """ Ensures the given server ids having the public ip removed if there is any @@ -224,19 +263,24 @@ class ClcPublicIp(object): servers_to_change = [ server for server in servers if len( server.PublicIPs().public_ips) > 0] - ips_to_delete = [] - for server in servers_to_change: - for ip_address in server.PublicIPs().public_ips: - ips_to_delete.append(ip_address) for server in servers_to_change: if not self.module.check_mode: - for ip in ips_to_delete: - result = ip.Delete() - results.append(result) + result = self._remove_publicip_from_server(server) + results.append(result) changed_server_ids.append(server.id) changed = True return changed, changed_server_ids, results + def _remove_publicip_from_server(self, server): + try: + for ip_address in server.PublicIPs().public_ips: + result = ip_address.Delete() + except CLCException, ex: + self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + def _wait_for_requests_to_complete(self, requests_lst): """ Waits until the CLC requests are complete if the wait argument is True From ed5c623e7c1fa69d7c9570194e4d7612d957029c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 22:25:52 -0400 Subject: [PATCH 133/157] added placeholder to be used on build (TODO: update makefile) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 53adb84c822..ee36851a03e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.2 +${version} From ff2386faf49dd44964fac084ed7199ab4ea5f741 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Jul 2015 07:30:41 -0700 Subject: [PATCH 134/157] Tabs to spaces Fixes #666 --- packaging/language/bundler.py | 144 +++++++++++++++++----------------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index e98350a7b70..f4aeff4156f 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -129,81 +129,81 @@ EXAMPLES=''' def get_bundler_executable(module): - if module.params.get('executable'): - return module.params.get('executable').split(' ') - else: - return [ module.get_bin_path('bundle', True) ] + if module.params.get('executable'): + return module.params.get('executable').split(' ') + else: + return [ module.get_bin_path('bundle', True) ] def main(): - module = AnsibleModule( - argument_spec=dict( - executable=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False), - exclude_groups=dict(default=None, required=False, type='list'), - clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False), - local=dict(default=False, required=False, type='bool'), - deployment_mode=dict(default=False, required=False, type='bool'), - user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False), - binstub_directory=dict(default=None, required=False), - extra_args=dict(default=None, required=False), - ), - supports_check_mode=True - ) - - executable = module.params.get('executable') - state = module.params.get('state') - chdir = module.params.get('chdir') - exclude_groups = module.params.get('exclude_groups') - clean = module.params.get('clean') - gemfile = module.params.get('gemfile') - local = module.params.get('local') - deployment_mode = module.params.get('deployment_mode') - user_install = module.params.get('user_install') - gem_path = module.params.get('gem_install_path') - binstub_directory = module.params.get('binstub_directory') - extra_args = module.params.get('extra_args') - - cmd = get_bundler_executable(module) - - if module.check_mode: - cmd.append('check') - rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) - - module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) - - if state == 'present': - cmd.append('install') - if exclude_groups: - cmd.extend(['--without', ':'.join(exclude_groups)]) - if clean: - cmd.append('--clean') - if gemfile: - cmd.extend(['--gemfile', gemfile]) - if local: - cmd.append('--local') - if deployment_mode: - cmd.append('--deployment') - if not user_install: - cmd.append('--system') - if gem_path: - cmd.extend(['--path', gem_path]) - if binstub_directory: - cmd.extend(['--binstubs', binstub_directory]) - else: - cmd.append('update') - if local: - cmd.append('--local') - - if extra_args: - cmd.extend(extra_args.split(' ')) - - rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) - - module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False), + exclude_groups=dict(default=None, required=False, type='list'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False), + binstub_directory=dict(default=None, required=False), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + executable = module.params.get('executable') + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_install_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) from ansible.module_utils.basic import * From 8a41108b2b0f25f709ee7939f918550a44b81fe4 Mon Sep 17 00:00:00 2001 From: Siva Popuri Date: Tue, 14 Jul 2015 16:36:50 -0500 Subject: [PATCH 135/157] corrected the license string to make it compatible with GPLV3 --- cloud/centurylink/clc_publicip.py | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/cloud/centurylink/clc_publicip.py b/cloud/centurylink/clc_publicip.py index ed3228e1996..77632c1cbfe 100644 --- a/cloud/centurylink/clc_publicip.py +++ b/cloud/centurylink/clc_publicip.py @@ -1,29 +1,22 @@ #!/usr/bin/python -# CenturyLink Cloud Ansible Modules. # -# These Ansible modules enable the CenturyLink Cloud v2 API to be called -# from an within Ansible Playbook. +# Copyright (c) 2015 CenturyLink # -# This file is part of CenturyLink Cloud, and is maintained -# by the Workflow as a Service Team +# This file is part of Ansible. # -# Copyright 2015 CenturyLink Cloud +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# CenturyLink Cloud: http://www.CenturyLinkCloud.com -# API Documentation: https://www.centurylinkcloud.com/api-docs/v2/ +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see # DOCUMENTATION = ''' From 4e140bb80e15b85da3ff724f30c5d4342e0dd544 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Sun, 11 Jan 2015 16:42:45 +0000 Subject: [PATCH 136/157] Add Elasticsearch plugin module --- packaging/elasticsearch_plugin.py | 160 ++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 packaging/elasticsearch_plugin.py diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py new file mode 100644 index 00000000000..38303686e8d --- /dev/null +++ b/packaging/elasticsearch_plugin.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os + +from ansible.module_utils.basic import * + +""" +Ansible module to manage elasticsearch plugins +(c) 2015, Mathew Davies + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: elasticsearch_plugin +short_description: Manage Elasticsearch plugins +description: + - Manages Elasticsearch plugins. +version_added: "" +author: Mathew Davies (@ThePixelDeveloper) +options: + name: + description: + - Name of the plugin to install + required: True + state: + description: + - Desired state of a plugin. + required: False + choices: [present, absent] + default: present + url: + description: + - Set exact URL to download the plugin from + required: False + timeout: + description: + - Timeout setting: 30s, 1m, 1h... (1m by default) + required: False + plugin_bin: + description: + - Location of the plugin binary + required: False + default: /usr/share/elasticsearch/bin/plugin + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch + required: False + default: /usr/share/elasticsearch/plugins/ + version: + description: + - Version of the plugin to be installed. +''' + +EXAMPLES = ''' +# Install Elasticsearch head plugin +- elasticsearch_plugin: state=present name="mobz/elasticsearch-head" +''' + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_dir, working_dir): + return os.path.isdir(os.path.join(working_dir, plugin_dir)) + + +def parse_error(string): + reason = "reason: " + return string[string.index(reason) + len(reason):].strip() + + +def main(): + + package_state_map = dict( + present="--install", + absent="--remove" + ) + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=package_state_map.keys()), + url=dict(default=None), + timeout=dict(default="1m"), + plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"), + plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"), + version=dict(default=None) + ) + ) + + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + version = module.params["version"] + + present = is_plugin_present(parse_plugin_repo(name), plugin_dir) + + print state + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name) + + if (version): + name = name + '/' + version + + cmd_args = [plugin_bin, package_state_map[state], name] + + if url: + cmd_args.append("--url %s" % url) + + if timeout: + cmd_args.append("--timeout %s" % timeout) + + cmd = " ".join(cmd_args) + + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + +if __name__ == "__main__": + main() From ebbe84b2d6fda94f3d40c7bbbd95ba48bfddb65d Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 20:38:58 +0100 Subject: [PATCH 137/157] Document defaults --- packaging/elasticsearch_plugin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 38303686e8d..818ebb00484 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -47,6 +47,7 @@ options: description: - Set exact URL to download the plugin from required: False + default: None timeout: description: - Timeout setting: 30s, 1m, 1h... (1m by default) @@ -64,6 +65,7 @@ options: version: description: - Version of the plugin to be installed. + default: None ''' EXAMPLES = ''' From 93e59297f0f71e85d18482aa2abe1733720c5d66 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 20:55:50 +0100 Subject: [PATCH 138/157] Remove debugging line --- packaging/elasticsearch_plugin.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 818ebb00484..3c0d6124e82 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -130,8 +130,6 @@ def main(): version = module.params["version"] present = is_plugin_present(parse_plugin_repo(name), plugin_dir) - - print state # skip if the state is correct if (present and state == "present") or (state == "absent" and not present): From 045f0908e2002a98e8b50200530a32872dcb87d5 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 20:56:05 +0100 Subject: [PATCH 139/157] Add required property to version documentation --- packaging/elasticsearch_plugin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 3c0d6124e82..15821eb6adc 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -65,6 +65,7 @@ options: version: description: - Version of the plugin to be installed. + required: False default: None ''' From 394053ff2bab918d905a44ba11704aa0ebf39124 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 20:56:45 +0100 Subject: [PATCH 140/157] Add default documentation for timeout --- packaging/elasticsearch_plugin.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 15821eb6adc..4838d478ef4 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -50,8 +50,9 @@ options: default: None timeout: description: - - Timeout setting: 30s, 1m, 1h... (1m by default) + - Timeout setting: 30s, 1m, 1h... required: False + default: 1m plugin_bin: description: - Location of the plugin binary From 6fa1809ec4007faf623b955e8a811e87aa87c3b9 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 21:00:15 +0100 Subject: [PATCH 141/157] Move ansible util import to the bottom of the module --- packaging/elasticsearch_plugin.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 4838d478ef4..34b028accaf 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -3,8 +3,6 @@ import os -from ansible.module_utils.basic import * - """ Ansible module to manage elasticsearch plugins (c) 2015, Mathew Davies @@ -158,5 +156,6 @@ def main(): module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) -if __name__ == "__main__": - main() +from ansible.module_utils.basic import * + +main() From fb42f6effcbed7980ff9c1db9a5f85ffa3d1183e Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 21:12:42 +0100 Subject: [PATCH 142/157] Note that the plugin can't be updated once installed --- packaging/elasticsearch_plugin.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 34b028accaf..c263388b9e6 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -63,7 +63,8 @@ options: default: /usr/share/elasticsearch/plugins/ version: description: - - Version of the plugin to be installed. + - Version of the plugin to be installed. + If plugin exists with previous version, it will NOT be updated required: False default: None ''' From 2d2ea412aeef207af26906bd78a30b29486dbfd9 Mon Sep 17 00:00:00 2001 From: Mathew Davies Date: Thu, 16 Jul 2015 21:15:15 +0100 Subject: [PATCH 143/157] Add more examples --- packaging/elasticsearch_plugin.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index c263388b9e6..f1053144e12 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -72,6 +72,12 @@ options: EXAMPLES = ''' # Install Elasticsearch head plugin - elasticsearch_plugin: state=present name="mobz/elasticsearch-head" + +# Install specific version of a plugin +- elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0" + +# Uninstall Elasticsearch head plugin +- elasticsearch_plugin: state=absent name="mobz/elasticsearch-head" ''' From 7b2f2b766799e179c0220aff92b0464e6782a927 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 17:55:20 -0400 Subject: [PATCH 144/157] added version added --- packaging/elasticsearch_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index f1053144e12..6cddd8643c3 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -28,7 +28,7 @@ module: elasticsearch_plugin short_description: Manage Elasticsearch plugins description: - Manages Elasticsearch plugins. -version_added: "" +version_added: "2.0" author: Mathew Davies (@ThePixelDeveloper) options: name: From bbc0f853d06fa2c28b097723e7f9bc92a9ba8107 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 18:02:42 -0400 Subject: [PATCH 145/157] minor doc fixes --- packaging/elasticsearch_plugin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 6cddd8643c3..7b092a13667 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -48,7 +48,7 @@ options: default: None timeout: description: - - Timeout setting: 30s, 1m, 1h... + - "Timeout setting: 30s, 1m, 1h..." required: False default: 1m plugin_bin: @@ -62,8 +62,8 @@ options: required: False default: /usr/share/elasticsearch/plugins/ version: - description: - - Version of the plugin to be installed. + description: + - Version of the plugin to be installed. If plugin exists with previous version, it will NOT be updated required: False default: None @@ -141,7 +141,7 @@ def main(): # skip if the state is correct if (present and state == "present") or (state == "absent" and not present): module.exit_json(changed=False, name=name) - + if (version): name = name + '/' + version From 2754157d87d0c3db2c769a68fb0fa63f2dd53611 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 00:48:33 -0400 Subject: [PATCH 146/157] minor doc fixes --- windows/win_unzip.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/windows/win_unzip.py b/windows/win_unzip.py index 7c5ac322b97..799ab1bda31 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' --- module: win_unzip -version_added: "" +version_added: "2.0" short_description: Unzips compressed files on the Windows node description: - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). Has ability to recursively unzip files within the src zip file provided using Read-Archive and piping to Expand-Archive (Using PSCX). If the destination directory does not exist, it will be created before unzipping the file. If a .zip file is specified as src and recurse is true then PSCX will be installed. Specifying rm parameter will allow removal of the src file after extraction. @@ -33,14 +33,10 @@ options: description: - File to be unzipped (provide absolute path) required: true - default: null - aliases: [] dest: description: - Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created. required: true - default: null - aliases: [] rm: description: - Remove the zip file, after unzipping @@ -51,7 +47,6 @@ options: - yes - no default: false - aliases: [] recurse: description: - Recursively expand zipped files within the src file. @@ -62,14 +57,12 @@ options: - false - yes - no - aliases: [] creates: description: - If this file or directory exists the specified src will not be extracted. required: no default: null - aliases: [] -author: Phil Schwartz +author: Phil Schwartz ''' EXAMPLES = ''' From cc305adfb672283d91e5e03775dd4f512350b65e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 00:51:08 -0400 Subject: [PATCH 147/157] minor doc fixes --- windows/win_iis_virtualdirectory.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index e5bbd950007..1ccb34a65d3 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -30,8 +30,6 @@ options: description: - The name of the virtual directory to create or remove required: true - default: null - aliases: [] state: description: - Whether to add or remove the specified virtual directory @@ -40,28 +38,20 @@ options: - present required: false default: null - aliases: [] site: description: - The site name under which the virtual directory is created or exists. required: false default: null - aliases: [] application: description: - The application under which the virtual directory is created or exists. required: false default: null - aliases: [] physical_path: description: - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist. required: false default: null - aliases: [] author: Henrik Wallström ''' - -EXAMPLES = ''' - -''' From cf764bf0604ead05fda0457b3342d556b4ef4807 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 01:19:21 -0400 Subject: [PATCH 148/157] minor doc fixes --- cloud/amazon/cloudtrail.py | 2 +- clustering/consul.py | 41 +++++++++++++++++++------------------- clustering/consul_kv.py | 10 +++++----- 3 files changed, 26 insertions(+), 27 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 1c9313bbf7b..557f2ebaae3 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -21,7 +21,7 @@ short_description: manage CloudTrail creation and deletion description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. version_added: "2.0" -author: +author: - "Ansible Core Team" - "Ted Timmons" requirements: diff --git a/clustering/consul.py b/clustering/consul.py index 083173230f7..116517571a5 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -19,30 +19,30 @@ DOCUMENTATION = """ module: consul -short_description: "Add, modify & delete services within a consul cluster. - See http://consul.io for more details." +short_description: "Add, modify & delete services within a consul cluster." description: - - registers services and checks for an agent with a consul cluster. A service - is some process running on the agent node that should be advertised by + - Registers services and checks for an agent with a consul cluster. + A service is some process running on the agent node that should be advertised by consul's discovery mechanism. It may optionally supply a check definition, a periodic service test to notify the consul cluster of service's health. - Checks may also be registered per node e.g. disk usage, or cpu usage and + - "Checks may also be registered per node e.g. disk usage, or cpu usage and notify the health of the entire node to the cluster. Service level checks do not require a check name or id as these are derived - by Consul from the Service name and id respectively by appending 'service:'. - Node level checks require a check_name and optionally a check_id. - Currently, there is no complete way to retrieve the script, interval or ttl + by Consul from the Service name and id respectively by appending 'service:' + Node level checks require a check_name and optionally a check_id." + - Currently, there is no complete way to retrieve the script, interval or ttl metadata for a registered check. Without this metadata it is not possible to - tell if the data supplied with ansible represents a change to a check. As a - result this does not attempt to determine changes and will always report a + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a changed occurred. An api method is planned to supply this metadata so at that stage change management will be added. + - "See http://consul.io for more details." requirements: - "python >= 2.6" - python-consul - requests version_added: "2.0" -author: "Steve Gargan (@sgargan)" +author: "Steve Gargan (@sgargan)" options: state: description: @@ -50,7 +50,7 @@ options: required: true choices: ['present', 'absent'] service_name: - desciption: + description: - Unique name for the service on a node, must be unique per node, required if registering a service. May be ommitted if registering a node level check @@ -95,11 +95,11 @@ options: interval: description: - the interval at which the service check will be run. This is a number - with a s or m suffix to signify the units of seconds or minutes e.g - 15s or 1m. If no suffix is supplied, m will be used by default e.g. + with a s or m suffix to signify the units of seconds or minutes e.g + 15s or 1m. If no suffix is supplied, m will be used by default e.g. 1 will be 1m. Required if the script param is specified. required: false - default: None + default: None check_id: description: - an ID for the service check, defaults to the check name, ignored if @@ -113,20 +113,19 @@ options: required: false default: None ttl: - description: + description: - checks can be registered with a ttl instead of a script and interval this means that the service will check in with the agent before the - ttl expires. If it doesn't the check will be considered failed. + ttl expires. If it doesn't the check will be considered failed. Required if registering a check and the script an interval are missing - Similar to the interval this is a number with a s or m suffix to - signify the units of seconds or minutes e.g 15s or 1m. If no suffix + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g 15s or 1m. If no suffix is supplied, m will be used by default e.g. 1 will be 1m required: false default: None token: description: - - the token key indentifying an ACL rule set. May be required to - register services. + - the token key indentifying an ACL rule set. May be required to register services. required: false default: None """ diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 2ba3a0315a3..b0d07dda83a 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -19,14 +19,14 @@ DOCUMENTATION = """ module: consul_kv -short_description: "manipulate entries in the key/value store of a consul - cluster. See http://www.consul.io/docs/agent/http.html#kv for more details." +short_description: Manipulate entries in the key/value store of a consul cluster. description: - - allows the addition, modification and deletion of key/value entries in a + - Allows the addition, modification and deletion of key/value entries in a consul cluster via the agent. The entire contents of the record, including - the indices, flags and session are returned as 'value'. If the key - represents a prefix then Note that when a value is removed, the existing + the indices, flags and session are returned as 'value'. + - If the key represents a prefix then Note that when a value is removed, the existing value if any is returned as part of the results. + - "See http://www.consul.io/docs/agent/http.html#kv for more details." requirements: - "python >= 2.6" - python-consul From dd4d33b1fe959b57f96ed65ea642a258359cb846 Mon Sep 17 00:00:00 2001 From: Rob White Date: Mon, 6 Jul 2015 19:46:33 +1000 Subject: [PATCH 149/157] New module - s3_logging --- cloud/amazon/s3_logging.py | 185 +++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 cloud/amazon/s3_logging.py diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py new file mode 100644 index 00000000000..313518510c9 --- /dev/null +++ b/cloud/amazon/s3_logging.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: s3_logging +short_description: Manage logging facility of an s3 bucket in AWS +description: + - Manage logging facility of an s3 bucket in AWS +version_added: "2.0" +author: Rob White (@wimnat) +options: + name: + description: + - "Name of the s3 bucket." + required: true + default: null + region: + description: + - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard." + required: false + default: null + state: + description: + - "Enable or disable logging." + required: false + default: present + choices: [ 'present', 'absent' ] + target_bucket: + description: + - "The bucket to log to." + required: true + default: null + target_prefix: + description: + - "The prefix that should be prepended to the generated log files written to the target_bucket." + required: false + default: no + +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs + s3_logging: + name: mywebsite.com + target_bucket: mylogs + target_prefix: logs/mywebsite.com + state: present + +- name: Remove logging on an s3 bucket + s3_logging: + name: mywebsite.com + state: absent + +''' + +try: + import boto.ec2 + from boto.s3.connection import OrdinaryCallingFormat, Location + from boto.exception import BotoServerError, S3CreateError, S3ResponseError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def compare_bucket_logging(bucket, target_bucket, target_prefix): + + bucket_log_obj = bucket.get_logging_status() + if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix: + return False + else: + return True + + +def enable_bucket_logging(connection, module): + + bucket_name = module.params.get("name") + target_bucket = module.params.get("target_bucket") + target_prefix = module.params.get("target_prefix") + changed = False + + try: + bucket = connection.get_bucket(bucket_name) + except S3ResponseError as e: + module.fail_json(msg=e.message) + + try: + if not compare_bucket_logging(bucket, target_bucket, target_prefix): + # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket + try: + target_bucket_obj = connection.get_bucket(target_bucket) + except S3ResponseError as e: + if e.status == 301: + module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged") + else: + module.fail_json(msg=e.message) + target_bucket_obj.set_as_logging_target() + + bucket.enable_logging(target_bucket, target_prefix) + changed = True + + except S3ResponseError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + + +def disable_bucket_logging(connection, module): + + bucket_name = module.params.get("name") + changed = False + + try: + bucket = connection.get_bucket(bucket_name) + if not compare_bucket_logging(bucket, None, None): + bucket.disable_logging() + changed = True + except S3ResponseError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name = dict(required=True, default=None), + target_bucket = dict(required=True, default=None), + target_prefix = dict(required=False, default=""), + state = dict(required=False, default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region in ('us-east-1', '', None): + # S3ism for the US Standard region + location = Location.DEFAULT + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + try: + connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params) + # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases + if connection is None: + connection = boto.connect_s3(**aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + + + state = module.params.get("state") + + if state == 'present': + enable_bucket_logging(connection, module) + elif state == 'absent': + disable_bucket_logging(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() \ No newline at end of file From b598161f4646678f419d4afd3a651019d7e4c895 Mon Sep 17 00:00:00 2001 From: Phil Date: Fri, 17 Jul 2015 22:14:18 -0500 Subject: [PATCH 150/157] minor doc fixes that had lingering description of deprecated functions --- windows/win_unzip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_unzip.py b/windows/win_unzip.py index 799ab1bda31..aa0180baf74 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -25,9 +25,9 @@ DOCUMENTATION = ''' --- module: win_unzip version_added: "2.0" -short_description: Unzips compressed files on the Windows node +short_description: Unzips compressed files and archives on the Windows node description: - - Unzips compressed files, and can force reboot (if needed, i.e. such as hotfixes). Has ability to recursively unzip files within the src zip file provided using Read-Archive and piping to Expand-Archive (Using PSCX). If the destination directory does not exist, it will be created before unzipping the file. If a .zip file is specified as src and recurse is true then PSCX will be installed. Specifying rm parameter will allow removal of the src file after extraction. + - Unzips compressed files and archives. For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX) has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction. options: src: description: From ebb91255670c4e02aadc3defcea6a09fd85b8726 Mon Sep 17 00:00:00 2001 From: Maciej Delmanowski Date: Sat, 18 Jul 2015 13:54:30 +0200 Subject: [PATCH 151/157] virt: remove BabyJSON Removed the usage of baby json. This is in response to the fact that the baby json functionality was removed in Ansible 1.8 Ref: #430 --- cloud/misc/virt.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index 80b8e2558eb..b59c7ed3de3 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -93,8 +93,9 @@ import sys try: import libvirt except ImportError: - print "failed=True msg='libvirt python module unavailable'" - sys.exit(1) + HAS_VIRT = False +else: + HAS_VIRT = True ALL_COMMANDS = [] VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', @@ -481,6 +482,11 @@ def main(): xml = dict(), )) + if not HAS_VIRT: + module.fail_json( + msg='The `libvirt` module is not importable. Check the requirements.' + ) + rc = VIRT_SUCCESS try: rc, result = core(module) From c791282c95a3c5905456450ddba7f83f427ab0d0 Mon Sep 17 00:00:00 2001 From: Rob White Date: Sun, 19 Jul 2015 11:10:48 +1000 Subject: [PATCH 152/157] Removed requirement for target_bucket --- cloud/amazon/s3_logging.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py index 313518510c9..fc199c500dd 100644 --- a/cloud/amazon/s3_logging.py +++ b/cloud/amazon/s3_logging.py @@ -40,8 +40,8 @@ options: choices: [ 'present', 'absent' ] target_bucket: description: - - "The bucket to log to." - required: true + - "The bucket to log to. Required when state=present." + required: false default: null target_prefix: description: @@ -142,7 +142,7 @@ def main(): argument_spec.update( dict( name = dict(required=True, default=None), - target_bucket = dict(required=True, default=None), + target_bucket = dict(required=False, default=None), target_prefix = dict(required=False, default=""), state = dict(required=False, default='present', choices=['present', 'absent']) ) From 9fb2eae7ddba6be8aa0d71f4ee3d983ef177c741 Mon Sep 17 00:00:00 2001 From: Rob White Date: Sun, 19 Jul 2015 13:43:04 +1000 Subject: [PATCH 153/157] Doc fixup --- cloud/amazon/s3_logging.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py index fc199c500dd..75b3fe73508 100644 --- a/cloud/amazon/s3_logging.py +++ b/cloud/amazon/s3_logging.py @@ -26,7 +26,6 @@ options: description: - "Name of the s3 bucket." required: true - default: null region: description: - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard." @@ -47,7 +46,7 @@ options: description: - "The prefix that should be prepended to the generated log files written to the target_bucket." required: false - default: no + default: "" extends_documentation_fragment: aws ''' @@ -141,7 +140,7 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - name = dict(required=True, default=None), + name = dict(required=True), target_bucket = dict(required=False, default=None), target_prefix = dict(required=False, default=""), state = dict(required=False, default='present', choices=['present', 'absent']) From 6c9410dce9fecff12cbb48001f6db166e47d0599 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 19 Jul 2015 14:57:08 +0200 Subject: [PATCH 154/157] cloudstack: cs_portforward: fix public_end_port was used for private_end_port --- cloud/cloudstack/cs_portforward.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index df95bfd3ea6..960a8607065 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -261,7 +261,7 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): public_port = self.module.params.get('public_port') public_end_port = self.get_public_end_port() private_port = self.module.params.get('private_port') - private_end_port = self.get_public_end_port() + private_end_port = self.get_private_end_port() args = {} args['ipaddressid'] = self.get_ip_address(key='id') From 8e6e9c782bc4425404d3f0dcce8ea19219f03ed4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 19 Jul 2015 15:09:49 +0200 Subject: [PATCH 155/157] cloudstack: use get_or_fallback() from cloudstack utils --- cloud/cloudstack/cs_firewall.py | 10 ++-------- cloud/cloudstack/cs_instance.py | 9 +-------- cloud/cloudstack/cs_network.py | 9 +-------- cloud/cloudstack/cs_portforward.py | 24 ++++++----------------- cloud/cloudstack/cs_project.py | 11 ++--------- cloud/cloudstack/cs_securitygroup_rule.py | 10 ++-------- 6 files changed, 14 insertions(+), 59 deletions(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 97cf97e781e..27350eab91b 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -216,18 +216,12 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): self.firewall_rule = None - def get_end_port(self): - if self.module.params.get('end_port'): - return self.module.params.get('end_port') - return self.module.params.get('start_port') - - def get_firewall_rule(self): if not self.firewall_rule: cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') - end_port = self.get_end_port() + end_port = self.get_or_fallback('end_port', 'start_port') icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') fw_type = self.module.params.get('type') @@ -328,7 +322,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): args['cidrlist'] = self.module.params.get('cidr') args['protocol'] = self.module.params.get('protocol') args['startport'] = self.module.params.get('start_port') - args['endport'] = self.get_end_port() + args['endport'] = self.get_or_fallback('end_port', 'start_port') args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 7c2c117604d..d8412879691 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -500,13 +500,6 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): return user_data - def get_display_name(self): - display_name = self.module.params.get('display_name') - if not display_name: - display_name = self.module.params.get('name') - return display_name - - def deploy_instance(self): self.result['changed'] = True @@ -555,7 +548,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args_instance_update = {} args_instance_update['id'] = instance['id'] args_instance_update['group'] = self.module.params.get('group') - args_instance_update['displayname'] = self.get_display_name() + args_instance_update['displayname'] = self.get_or_fallback('display_name', 'name') args_instance_update['userdata'] = self.get_user_data() args_instance_update['ostypeid'] = self.get_os_type(key='id') diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index 50dd2981e72..c4fd51b7a0b 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -335,13 +335,6 @@ class AnsibleCloudStackNetwork(AnsibleCloudStack): self.network = None - def get_or_fallback(self, key=None, fallback_key=None): - value = self.module.params.get(key) - if not value: - value = self.module.params.get(fallback_key) - return value - - def get_vpc(self, key=None): vpc = self.module.params.get('vpc') if not vpc: @@ -380,7 +373,7 @@ class AnsibleCloudStackNetwork(AnsibleCloudStack): def _get_args(self): args = {} args['name'] = self.module.params.get('name') - args['displaytext'] = self.get_or_fallback('displaytext','name') + args['displaytext'] = self.get_or_fallback('displaytext', 'name') args['networkdomain'] = self.module.params.get('network_domain') args['networkofferingid'] = self.get_network_offering(key='id') return args diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 960a8607065..d1b8db4d65a 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -217,18 +217,6 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): self.vm_default_nic = None - def get_public_end_port(self): - if not self.module.params.get('public_end_port'): - return self.module.params.get('public_port') - return self.module.params.get('public_end_port') - - - def get_private_end_port(self): - if not self.module.params.get('private_end_port'): - return self.module.params.get('private_port') - return self.module.params.get('private_end_port') - - def get_vm_guest_ip(self): vm_guest_ip = self.module.params.get('vm_guest_ip') default_nic = self.get_vm_default_nic() @@ -259,9 +247,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): if not self.portforwarding_rule: protocol = self.module.params.get('protocol') public_port = self.module.params.get('public_port') - public_end_port = self.get_public_end_port() + public_end_port = self.get_or_fallback('public_end_port', 'public_port') private_port = self.module.params.get('private_port') - private_end_port = self.get_private_end_port() + private_end_port = self.get_or_fallback('private_end_port', 'private_port') args = {} args['ipaddressid'] = self.get_ip_address(key='id') @@ -290,9 +278,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): args = {} args['protocol'] = self.module.params.get('protocol') args['publicport'] = self.module.params.get('public_port') - args['publicendport'] = self.get_public_end_port() + args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port') args['privateport'] = self.module.params.get('private_port') - args['privateendport'] = self.get_private_end_port() + args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port') args['openfirewall'] = self.module.params.get('open_firewall') args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') @@ -312,9 +300,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): args = {} args['protocol'] = self.module.params.get('protocol') args['publicport'] = self.module.params.get('public_port') - args['publicendport'] = self.get_public_end_port() + args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port') args['privateport'] = self.module.params.get('private_port') - args['privateendport'] = self.get_private_end_port() + args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port') args['openfirewall'] = self.module.params.get('open_firewall') args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index f09c42f5899..896232f3053 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -148,13 +148,6 @@ class AnsibleCloudStackProject(AnsibleCloudStack): self.project = None - def get_displaytext(self): - displaytext = self.module.params.get('displaytext') - if not displaytext: - displaytext = self.module.params.get('name') - return displaytext - - def get_project(self): if not self.project: project = self.module.params.get('name') @@ -184,7 +177,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack): def update_project(self, project): args = {} args['id'] = project['id'] - args['displaytext'] = self.get_displaytext() + args['displaytext'] = self.get_or_fallback('displaytext', 'name') if self._has_changed(args, project): self.result['changed'] = True @@ -205,7 +198,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack): args = {} args['name'] = self.module.params.get('name') - args['displaytext'] = self.get_displaytext() + args['displaytext'] = self.get_or_fallback('displaytext', 'name') args['account'] = self.get_account('name') args['domainid'] = self.get_domain('id') diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 0780e12d70d..65bd7fd5640 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -222,18 +222,12 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): and cidr == rule['cidr'] - def get_end_port(self): - if self.module.params.get('end_port'): - return self.module.params.get('end_port') - return self.module.params.get('start_port') - - def _get_rule(self, rules): user_security_group_name = self.module.params.get('user_security_group') cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') - end_port = self.get_end_port() + end_port = self.get_or_fallback('end_port', 'start_port') icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') @@ -291,7 +285,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): args['protocol'] = self.module.params.get('protocol') args['startport'] = self.module.params.get('start_port') - args['endport'] = self.get_end_port() + args['endport'] = self.get_or_fallback('end_port', 'start_port') args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') args['projectid'] = self.get_project('id') From 7d6738ab9dcf1d85f5a0ae95d304e431b64c0797 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 19 Jul 2015 15:10:36 +0200 Subject: [PATCH 156/157] cloudstack: cs_instance: fix display_name not used in deployment --- cloud/cloudstack/cs_instance.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index d8412879691..f8bef7c89e2 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -517,6 +517,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['ipaddress'] = self.module.params.get('ip_address') args['ip6address'] = self.module.params.get('ip6_address') args['name'] = self.module.params.get('name') + args['displayname'] = self.get_or_fallback('display_name', 'name') args['group'] = self.module.params.get('group') args['keypair'] = self.module.params.get('ssh_key') args['size'] = self.module.params.get('disk_size') From 312b34ad81b6fa9325af969f12b1470d8df21e75 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 20 Jul 2015 15:33:35 +0200 Subject: [PATCH 157/157] cloudstack: new module cs_staticnat --- cloud/cloudstack/cs_staticnat.py | 316 +++++++++++++++++++++++++++++++ 1 file changed, 316 insertions(+) create mode 100644 cloud/cloudstack/cs_staticnat.py diff --git a/cloud/cloudstack/cs_staticnat.py b/cloud/cloudstack/cs_staticnat.py new file mode 100644 index 00000000000..5761a3990e9 --- /dev/null +++ b/cloud/cloudstack/cs_staticnat.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_staticnat +short_description: Manages static NATs on Apache CloudStack based clouds. +description: + - Create, update and remove static NATs. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + ip_address: + description: + - Public IP address the static NAT is assigned to. + required: true + vm: + description: + - Name of virtual machine which we make the static NAT for. + - Required if C(state=present). + required: false + default: null + vm_guest_ip: + description: + - VM guest NIC secondary IP address for the static NAT. + required: false + default: false + state: + description: + - State of the static NAT. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + domain: + description: + - Domain the static NAT is related to. + required: false + default: null + account: + description: + - Account the static NAT is related to. + required: false + default: null + project: + description: + - Name of the project the static NAT is related to. + required: false + default: null + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a static NAT: 1.2.3.4 -> web01 +- local_action: + module: cs_staticnat + ip_address: 1.2.3.4 + vm: web01 + +# remove a static NAT +- local_action: + module: cs_staticnat + ip_address: 1.2.3.4 + state: absent +''' + +RETURN = ''' +--- +ip_address: + description: Public IP address. + returned: success + type: string + sample: 1.2.3.4 +vm_name: + description: Name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_display_name: + description: Display name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_guest_ip: + description: IP of the virtual machine. + returned: success + type: string + sample: 10.101.65.152 +zone: + description: Name of zone the static NAT is related to. + returned: success + type: string + sample: ch-gva-2 +project: + description: Name of project the static NAT is related to. + returned: success + type: string + sample: Production +account: + description: Account the static NAT is related to. + returned: success + type: string + sample: example account +domain: + description: Domain the static NAT is related to. + returned: success + type: string + sample: example domain +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackStaticNat(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.vm_default_nic = None + + +# TODO: move it to cloudstack utils, also used in cs_portforward + def get_vm_guest_ip(self): + vm_guest_ip = self.module.params.get('vm_guest_ip') + default_nic = self.get_vm_default_nic() + + if not vm_guest_ip: + return default_nic['ipaddress'] + + for secondary_ip in default_nic['secondaryip']: + if vm_guest_ip == secondary_ip['ipaddress']: + return vm_guest_ip + self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) + + +# TODO: move it to cloudstack utils, also used in cs_portforward + def get_vm_default_nic(self): + if self.vm_default_nic: + return self.vm_default_nic + + nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) + if nics: + for n in nics['nic']: + if n['isdefault']: + self.vm_default_nic = n + return self.vm_default_nic + self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) + + + def create_static_nat(self, ip_address): + self.result['changed'] = True + args = {} + args['virtualmachineid'] = self.get_vm(key='id') + args['ipaddressid'] = ip_address['id'] + args['vmguestip'] = self.get_vm_guest_ip() + if not self.module.check_mode: + res = self.cs.enableStaticNat(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + # reset ip address and query new values + self.ip_address = None + ip_address = self.get_ip_address() + return ip_address + + + def update_static_nat(self, ip_address): + args = {} + args['virtualmachineid'] = self.get_vm(key='id') + args['ipaddressid'] = ip_address['id'] + args['vmguestip'] = self.get_vm_guest_ip() + + # make an alias, so we can use _has_changed() + ip_address['vmguestip'] = ip_address['vmipaddress'] + if self._has_changed(args, ip_address): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.disableStaticNat(ipaddressid=ip_address['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + res = self._poll_job(res, 'staticnat') + res = self.cs.enableStaticNat(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + # reset ip address and query new values + self.ip_address = None + ip_address = self.get_ip_address() + return ip_address + + + def present_static_nat(self): + ip_address = self.get_ip_address() + if not ip_address['isstaticnat']: + ip_address = self.create_static_nat(ip_address) + else: + ip_address = self.update_static_nat(ip_address) + return ip_address + + + def absent_static_nat(self): + ip_address = self.get_ip_address() + if ip_address['isstaticnat']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.disableStaticNat(ipaddressid=ip_address['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'staticnat') + return ip_address + + + def get_result(self, ip_address): + if ip_address: + if 'zonename' in ip_address: + self.result['zone'] = ip_address['zonename'] + if 'domain' in ip_address: + self.result['domain'] = ip_address['domain'] + if 'account' in ip_address: + self.result['account'] = ip_address['account'] + if 'project' in ip_address: + self.result['project'] = ip_address['project'] + if 'virtualmachinedisplayname' in ip_address: + self.result['vm_display_name'] = ip_address['virtualmachinedisplayname'] + if 'virtualmachinename' in ip_address: + self.result['vm'] = ip_address['virtualmachinename'] + if 'vmipaddress' in ip_address: + self.result['vm_guest_ip'] = ip_address['vmipaddress'] + if 'ipaddress' in ip_address: + self.result['ip_address'] = ip_address['ipaddress'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True), + vm = dict(default=None), + vm_guest_ip = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_static_nat = AnsibleCloudStackStaticNat(module) + + state = module.params.get('state') + if state in ['absent']: + ip_address = acs_static_nat.absent_static_nat() + else: + ip_address = acs_static_nat.present_static_nat() + + result = acs_static_nat.get_result(ip_address) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main()