From a55035c55874ac66351672039e94dd44349d5c7b Mon Sep 17 00:00:00 2001 From: Franck Nijhof Date: Sun, 12 Oct 2014 00:20:00 +0200 Subject: [PATCH 001/720] Added module for managing Apple Mac OSX user defaults --- system/mac_defaults.py | 351 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 351 insertions(+) create mode 100644 system/mac_defaults.py diff --git a/system/mac_defaults.py b/system/mac_defaults.py new file mode 100644 index 00000000000..861bebb8033 --- /dev/null +++ b/system/mac_defaults.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, GeekChimp - Franck Nijhof +# +# Originally developed for Macable: https://github.com/GeekChimp/macable +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: mac_defaults +author: Franck Nijhof +short_description: mac_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible +description: + - mac_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts. + Mac OS X applications and other programs use the defaults system to record user preferences and other + information that must be maintained when the applications aren't running (such as default font for new + documents, or the position of an Info panel). +version_added: 1.8 +options: + domain: + description: + - The domain is a domain name of the form com.companyname.appname. + required: false + default: NSGlobalDomain + key: + description: + - The key of the user preference + required: true + type: + description: + - The type of value to write. + required: false + default: string + choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ] + array_add: + description: + - Add new elements to the array for a key which has an array as its value. + required: false + default: string + choices: [ "true", "false" ] + value: + description: + - The value to write. Only required when state = present. + required: false + default: null + state: + description: + - The state of the user defaults + required: false + default: present + choices: [ "present", "absent" ] +notes: + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +''' + +EXAMPLES = ''' +- mac_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present +- mac_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present +- mac_defaults: key=AppleMeasurementUnits type=string value=Centimeters +- mac_defaults: + key: AppleLanguages + type: array + value: ["en", "nl"] +- mac_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent +''' + +from datetime import datetime + +# exceptions --------------------------------------------------------------- {{{ +class MacDefaultsException(Exception): + pass +# /exceptions -------------------------------------------------------------- }}} + +# class MacDefaults -------------------------------------------------------- {{{ +class MacDefaults(object): + + """ Class to manage Mac OS user defaults """ + + # init ---------------------------------------------------------------- {{{ + """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + def __init__(self, **kwargs): + + # Initial var for storing current defaults value + self.current_value = None + + # Just set all given parameters + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + # Try to find the defaults executable + self.executable = self.module.get_bin_path( + 'defaults', + required=False, + opt_dirs=self.path.split(':'), + ) + + if not self.executable: + raise MacDefaultsException("Unable to locate defaults executable.") + + # When state is present, we require a parameter + if self.state == "present" and self.value is None: + raise MacDefaultsException("Missing value parameter") + + # Ensure the value is the correct type + self.value = self._convert_type(self.type, self.value) + + # /init --------------------------------------------------------------- }}} + + # tools --------------------------------------------------------------- {{{ + """ Converts value to given type """ + def _convert_type(self, type, value): + + if type == "string": + return str(value) + elif type in ["bool", "boolean"]: + if value in [True, 1, "true", "1", "yes"]: + return True + elif value in [False, 0, "false", "0", "no"]: + return False + raise MacDefaultsException("Invalid boolean value: {0}".format(repr(value))) + elif type == "date": + try: + return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + except ValueError: + raise MacDefaultsException( + "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) + ) + elif type in ["int", "integer"]: + if not str(value).isdigit(): + raise MacDefaultsException("Invalid integer value: {0}".format(repr(value))) + return int(value) + elif type == "float": + try: + value = float(value) + except ValueError: + raise MacDefaultsException("Invalid float value: {0}".format(repr(value))) + return value + elif type == "array": + if not isinstance(value, list): + raise MacDefaultsException("Invalid value. Expected value to be an array") + return value + + raise MacDefaultsException('Type is not supported: {0}'.format(type)) + + """ Converts array output from defaults to an list """ + @staticmethod + def _convert_defaults_str_to_list(value): + + # Split output of defaults. Every line contains a value + value = value.splitlines() + + # Remove first and last item, those are not actual values + value.pop(0) + value.pop(-1) + + # Remove extra spaces and comma (,) at the end of values + value = [re.sub(',$', '', x.strip(' ')) for x in value] + + return value + # /tools -------------------------------------------------------------- }}} + + # commands ------------------------------------------------------------ {{{ + """ Reads value of this domain & key from defaults """ + def read(self): + # First try to find out the type + rc, out, err = self.module.run_command([self.executable, "read-type", self.domain, self.key]) + + # If RC is 1, the key does not exists + if rc == 1: + return None + + # If the RC is not 0, then terrible happened! Ooooh nooo! + if rc != 0: + raise MacDefaultsException("An error occurred while reading key type from defaults: " + out) + + # Ok, lets parse the type from output + type = out.strip().replace('Type is ', '') + + # Now get the current value + rc, out, err = self.module.run_command([self.executable, "read", self.domain, self.key]) + + # Strip output + # out = out.strip() + + # An non zero RC at this point is kinda strange... + if rc != 0: + raise MacDefaultsException("An error occurred while reading key value from defaults: " + out) + + # Convert string to list when type is array + if type == "array": + out = self._convert_defaults_str_to_list(out) + + # Store the current_value + self.current_value = self._convert_type(type, out) + + """ Writes value to this domain & key to defaults """ + def write(self): + + # We need to convert some values so the defaults commandline understands it + if type(self.value) is bool: + value = "TRUE" if self.value else "FALSE" + elif type(self.value) is int or type(self.value) is float: + value = str(self.value) + elif self.array_add and self.current_value is not None: + value = list(set(self.value) - set(self.current_value)) + elif isinstance(self.value, datetime): + value = self.value.strftime('%Y-%m-%d %H:%M:%S') + else: + value = self.value + + # When the type is array and array_add is enabled, morph the type :) + if self.type == "array" and self.array_add: + self.type = "array-add" + + # All values should be a list, for easy passing it to the command + if not isinstance(value, list): + value = [value] + + rc, out, err = self.module.run_command([self.executable, 'write', self.domain, self.key, '-' + self.type] + value) + + if rc != 0: + raise MacDefaultsException('An error occurred while writing value to defaults: ' + out) + + """ Deletes defaults key from domain """ + def delete(self): + rc, out, err = self.module.run_command([self.executable, 'delete', self.domain, self.key]) + if rc != 0: + raise MacDefaultsException("An error occurred while deleting key from defaults: " + out) + + # /commands ----------------------------------------------------------- }}} + + # run ----------------------------------------------------------------- {{{ + """ Does the magic! :) """ + def run(self): + + # Get the current value from defaults + self.read() + + # Handle absent state + if self.state == "absent": + print "Absent state detected!" + if self.current_value is None: + return False + self.delete() + return True + + # There is a type mismatch! Given type does not match the type in defaults + if self.current_value is not None and type(self.current_value) is not type(self.value): + raise MacDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__) + + # Current value matches the given value. Nothing need to be done. Arrays need extra care + if self.type == "array" and self.current_value is not None and not self.array_add and \ + set(self.current_value) == set(self.value): + return False + elif self.type == "array" and self.current_value is not None and self.array_add and \ + len(list(set(self.value) - set(self.current_value))) == 0: + return False + elif self.current_value == self.value: + return False + + # Change/Create/Set given key/value for domain in defaults + self.write() + return True + + # /run ---------------------------------------------------------------- }}} + +# /class MacDefaults ------------------------------------------------------ }}} + + +# main -------------------------------------------------------------------- {{{ +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict( + default="NSGlobalDomain", + required=False, + ), + key=dict( + default=None, + ), + type=dict( + default="string", + required=False, + choices=[ + "array", + "bool", + "boolean", + "date", + "float", + "int", + "integer", + "string", + ], + ), + array_add=dict( + default=False, + required=False, + choices=BOOLEANS, + ), + value=dict( + default=None, + required=False, + ), + state=dict( + default="present", + required=False, + choices=[ + "absent", "present" + ], + ), + path=dict( + default="/usr/bin:/usr/local/bin", + required=False, + ) + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + key = module.params['key'] + type = module.params['type'] + array_add = module.params['array_add'] + value = module.params['value'] + state = module.params['state'] + path = module.params['path'] + + try: + defaults = MacDefaults(module=module, domain=domain, key=key, type=type, + array_add=array_add, value=value, state=state, path=path) + changed = defaults.run() + module.exit_json(changed=changed) + except MacDefaultsException as e: + module.fail_json(msg=e.message) + +# /main ------------------------------------------------------------------- }}} + +from ansible.module_utils.basic import * +main() From 2c43cdb12378ba7c01ba2bb785f05f5a1c54d2cc Mon Sep 17 00:00:00 2001 From: Franck Nijhof Date: Sun, 12 Oct 2014 14:41:57 +0200 Subject: [PATCH 002/720] Renamed module from mac_defaults to osx_defaults so the naming is more up to par with existing modules (e.g. osx_say) --- system/{mac_defaults.py => osx_defaults.py} | 52 ++++++++++----------- 1 file changed, 25 insertions(+), 27 deletions(-) rename system/{mac_defaults.py => osx_defaults.py} (88%) diff --git a/system/mac_defaults.py b/system/osx_defaults.py similarity index 88% rename from system/mac_defaults.py rename to system/osx_defaults.py index 861bebb8033..8baed17f2eb 100644 --- a/system/mac_defaults.py +++ b/system/osx_defaults.py @@ -3,8 +3,6 @@ # (c) 2014, GeekChimp - Franck Nijhof # -# Originally developed for Macable: https://github.com/GeekChimp/macable -# # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or @@ -20,11 +18,11 @@ DOCUMENTATION = ''' --- -module: mac_defaults +module: osx_defaults author: Franck Nijhof -short_description: mac_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible +short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible description: - - mac_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts. + - osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts. Mac OS X applications and other programs use the defaults system to record user preferences and other information that must be maintained when the applications aren't running (such as default font for new documents, or the position of an Info panel). @@ -67,25 +65,25 @@ notes: ''' EXAMPLES = ''' -- mac_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present -- mac_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present -- mac_defaults: key=AppleMeasurementUnits type=string value=Centimeters -- mac_defaults: +- osx_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present +- osx_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present +- osx_defaults: key=AppleMeasurementUnits type=string value=Centimeters +- osx_defaults: key: AppleLanguages type: array value: ["en", "nl"] -- mac_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent +- osx_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent ''' from datetime import datetime # exceptions --------------------------------------------------------------- {{{ -class MacDefaultsException(Exception): +class OSXDefaultsException(Exception): pass # /exceptions -------------------------------------------------------------- }}} # class MacDefaults -------------------------------------------------------- {{{ -class MacDefaults(object): +class OSXDefaults(object): """ Class to manage Mac OS user defaults """ @@ -108,11 +106,11 @@ class MacDefaults(object): ) if not self.executable: - raise MacDefaultsException("Unable to locate defaults executable.") + raise OSXDefaultsException("Unable to locate defaults executable.") # When state is present, we require a parameter if self.state == "present" and self.value is None: - raise MacDefaultsException("Missing value parameter") + raise OSXDefaultsException("Missing value parameter") # Ensure the value is the correct type self.value = self._convert_type(self.type, self.value) @@ -130,30 +128,30 @@ class MacDefaults(object): return True elif value in [False, 0, "false", "0", "no"]: return False - raise MacDefaultsException("Invalid boolean value: {0}".format(repr(value))) + raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) elif type == "date": try: return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") except ValueError: - raise MacDefaultsException( + raise OSXDefaultsException( "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) ) elif type in ["int", "integer"]: if not str(value).isdigit(): - raise MacDefaultsException("Invalid integer value: {0}".format(repr(value))) + raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value))) return int(value) elif type == "float": try: value = float(value) except ValueError: - raise MacDefaultsException("Invalid float value: {0}".format(repr(value))) + raise OSXDefaultsException("Invalid float value: {0}".format(repr(value))) return value elif type == "array": if not isinstance(value, list): - raise MacDefaultsException("Invalid value. Expected value to be an array") + raise OSXDefaultsException("Invalid value. Expected value to be an array") return value - raise MacDefaultsException('Type is not supported: {0}'.format(type)) + raise OSXDefaultsException('Type is not supported: {0}'.format(type)) """ Converts array output from defaults to an list """ @staticmethod @@ -184,7 +182,7 @@ class MacDefaults(object): # If the RC is not 0, then terrible happened! Ooooh nooo! if rc != 0: - raise MacDefaultsException("An error occurred while reading key type from defaults: " + out) + raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out) # Ok, lets parse the type from output type = out.strip().replace('Type is ', '') @@ -197,7 +195,7 @@ class MacDefaults(object): # An non zero RC at this point is kinda strange... if rc != 0: - raise MacDefaultsException("An error occurred while reading key value from defaults: " + out) + raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out) # Convert string to list when type is array if type == "array": @@ -232,13 +230,13 @@ class MacDefaults(object): rc, out, err = self.module.run_command([self.executable, 'write', self.domain, self.key, '-' + self.type] + value) if rc != 0: - raise MacDefaultsException('An error occurred while writing value to defaults: ' + out) + raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out) """ Deletes defaults key from domain """ def delete(self): rc, out, err = self.module.run_command([self.executable, 'delete', self.domain, self.key]) if rc != 0: - raise MacDefaultsException("An error occurred while deleting key from defaults: " + out) + raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out) # /commands ----------------------------------------------------------- }}} @@ -259,7 +257,7 @@ class MacDefaults(object): # There is a type mismatch! Given type does not match the type in defaults if self.current_value is not None and type(self.current_value) is not type(self.value): - raise MacDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__) + raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__) # Current value matches the given value. Nothing need to be done. Arrays need extra care if self.type == "array" and self.current_value is not None and not self.array_add and \ @@ -338,11 +336,11 @@ def main(): path = module.params['path'] try: - defaults = MacDefaults(module=module, domain=domain, key=key, type=type, + defaults = OSXDefaults(module=module, domain=domain, key=key, type=type, array_add=array_add, value=value, state=state, path=path) changed = defaults.run() module.exit_json(changed=changed) - except MacDefaultsException as e: + except OSXDefaultsException as e: module.fail_json(msg=e.message) # /main ------------------------------------------------------------------- }}} From 130bd670d82cc55fa321021e819838e07ff10c08 Mon Sep 17 00:00:00 2001 From: Franck Nijhof Date: Mon, 13 Oct 2014 07:34:24 +0200 Subject: [PATCH 003/720] Small fix for boolean when boolean type was set via a variable (somehow changes the behaviour of Ansible because of YAML as it seems. Booleans then become represented as a string). --- system/osx_defaults.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/osx_defaults.py b/system/osx_defaults.py index 8baed17f2eb..0dd7ca8ff6b 100644 --- a/system/osx_defaults.py +++ b/system/osx_defaults.py @@ -124,9 +124,9 @@ class OSXDefaults(object): if type == "string": return str(value) elif type in ["bool", "boolean"]: - if value in [True, 1, "true", "1", "yes"]: + if value.lower() in [True, 1, "true", "1", "yes"]: return True - elif value in [False, 0, "false", "0", "no"]: + elif value.lower() in [False, 0, "false", "0", "no"]: return False raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) elif type == "date": @@ -191,7 +191,7 @@ class OSXDefaults(object): rc, out, err = self.module.run_command([self.executable, "read", self.domain, self.key]) # Strip output - # out = out.strip() + out = out.strip() # An non zero RC at this point is kinda strange... if rc != 0: From 7c5d9845574edc52df977e7d1628e0c82529ae18 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:50:55 -0700 Subject: [PATCH 004/720] initial stab at pushover notification module --- notification/pushover | 127 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 notification/pushover diff --git a/notification/pushover b/notification/pushover new file mode 100644 index 00000000000..ba9bafc9792 --- /dev/null +++ b/notification/pushover @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +# vim: set expandtab: +### +# Copyright (c) 2012, Jim Richardson +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions, and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions, and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the author of this software nor the name of +# contributors to this software may be used to endorse or promote products +# derived from this software without specific prior written consent. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +### + + +''' +License: GPL V2 See LICENSE file +Author: Jim Richardson +email: weaselkeeper@gmail.com + +''' + +DOCUMENTATION = ''' +--- +module: pushover +version_added: "1.8" +short_description: Send notifications via u(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +options: + msg: + description: + What message you wish to send. + required: true + app_token: + description: + Pushover issued token identifying your pushover app. + required: true + user_key: + description: + Pushover issued authentication key for your user. + required: true + pri: + description: Message priority (see u(https://pushover.net) for details.) + required: false + +author: Jim Richardson +''' + +EXAMPLES = ''' +- local_action: pushover msg="{{inventory_hostname}} has exploded in flames, + It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 +''' + +import urllib +import httplib + + +class pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + + def __init__(self): + self.host, self.port = 'api.pushover.net', 443 + + def run(self): + ''' Do, whatever it is, we do. ''' + # parse config + conn = httplib.HTTPSConnection(self.host, self.port) + conn.request("POST", "/1/messages.json", + urllib.urlencode(self.options), + {"Content-type": "application/x-www-form-urlencoded"}) + conn.getresponse() + return + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + app_token=dict(required=True), + user_key=dict(required=True), + pri=dict(required=False, default=0), + ), + ) + + msg_object = pushover() + msg_object.options = {} + msg_object.options['user'] = module.params['user_key'] + msg_object.options['token'] = module.params['app_token'] + msg_object.options['priority'] = module.params['pri'] + msg_object.options['message'] = module.params['msg'] + try: + msg_object.run() + except: + module.fail_json(msg='Wibble') + + module.exit_json(msg="OK", changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() From 64242fb1a6cfa9a06e8726ef5bb864ba49593a4a Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:55:46 -0700 Subject: [PATCH 005/720] slight tweak to preamble to bring into common with other ansible modules --- notification/pushover | 37 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/notification/pushover b/notification/pushover index ba9bafc9792..a05b64d8089 100644 --- a/notification/pushover +++ b/notification/pushover @@ -1,35 +1,22 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - - -# vim: set expandtab: -### # Copyright (c) 2012, Jim Richardson # All rights reserved. # -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions, and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the author of this software nor the name of -# contributors to this software may be used to endorse or promote products -# derived from this software without specific prior written consent. +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . ### From f0d81a5290b125bb794b3737a7323eb83b092246 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:57:10 -0700 Subject: [PATCH 006/720] remove extraneous info from preamble --- notification/pushover | 8 -------- 1 file changed, 8 deletions(-) diff --git a/notification/pushover b/notification/pushover index a05b64d8089..6b7c32d758d 100644 --- a/notification/pushover +++ b/notification/pushover @@ -20,14 +20,6 @@ ### - -''' -License: GPL V2 See LICENSE file -Author: Jim Richardson -email: weaselkeeper@gmail.com - -''' - DOCUMENTATION = ''' --- module: pushover From b0ec83ef08ddf845567e3b3f6eb65ef6b825e4f2 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 21:19:25 -0700 Subject: [PATCH 007/720] clarification of error and success messages --- notification/pushover | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/pushover b/notification/pushover index 6b7c32d758d..7fd66333f54 100644 --- a/notification/pushover +++ b/notification/pushover @@ -97,9 +97,9 @@ def main(): try: msg_object.run() except: - module.fail_json(msg='Wibble') + module.fail_json(msg='Unable to send msg via pushover') - module.exit_json(msg="OK", changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * From 344713365f89bdcaa328b313949f213a6190a55c Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 23 Oct 2014 21:02:20 -0600 Subject: [PATCH 008/720] Added the download_artifact module The download_artifact module resolves a maven dependency coordinate and downloads the artifact to the target path --- packaging/download_artifact.py | 367 +++++++++++++++++++++++++++++++++ 1 file changed, 367 insertions(+) create mode 100644 packaging/download_artifact.py diff --git a/packaging/download_artifact.py b/packaging/download_artifact.py new file mode 100644 index 00000000000..16855c142b3 --- /dev/null +++ b/packaging/download_artifact.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# + +__author__ = 'cschmidt' + +from lxml import etree +from urllib2 import Request, urlopen, URLError, HTTPError +import os +import hashlib +import sys +import base64 + +DOCUMENTATION = ''' +--- +module: download_artifact +short_description: Downloads an Artifact from a Maven Repository +version_added: "historical" +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve + - snapshots or release versions of the artifact and will resolve the latest available version if one is not + - available. +author: Chris Schmidt +requirements: + - python libxml + - python urllib2 +options: + group_id: + description: The Maven groupId coordinate + required: true + default: null + version_added: 0.0.1 + artifact_id: + description: The maven artifactId coordinate + required: true + default: null + version_added: 0.0.1 + version: + description: The maven version coordinate + required: false + default: latest + version_added: 0.0.1 + classifier: + description: The maven classifier coordinate + required: false + default: null + version_added: 0.0.1 + extension: + description: The maven type/extension coordinate + required: false + default: jar + version_added: 0.0.1 + repository_url: + description: The URL of the Maven Repository to download from + required: false + default: http://repo1.maven.org/maven2 + version_added: 0.0.1 + username: + description: The username to authenticate as to the Maven Repository + required: false + default: null + version_added: 0.0.1 + password: + description: The passwor to authenticate with to the Maven Repository + required: false + default: null + version_added: 0.0.1 + target: + description: The path where the artifact should be written to + required: true + default: false + version_added: 0.0.1 + state: + description: The desired state of the artifact + required: true + default: present + choices: [present,absent] + version_added: 0.0.1 +''' + +EXAMPLES = ''' +# Download the latest version of the commons-collections artifact from Maven Central +- download_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar + +# Download Apache Commons-Collections 3.2 from Maven Central +- download_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar + +# Download an artifact from a private repository requiring authentication +- download_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar + +# Download a WAR File to the Tomcat webapps directory to be deployed +- download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +''' + + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = self.group_id.replace(".", "/") + "/" + self.artifact_id + if with_version and self.version: + return base + "/" + self.version + else: + return base + + def _generate_filename(self): + if not self.classifier: + return self.artifact_id + "." + self.extension + else: + return self.artifact_id + "-" + self.classifier + "." + self.extension + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + if self.classifier: + return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + else: + return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[len(parts) - 1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, base="http://repo1.maven.org/maven2", username=None, password=None): + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.user_agent = "Maven Artifact Downloader/1.0" + self.username = username + self.password = password + + def _find_latest_version_available(self, artifact): + path = "/%s/maven-metadata.xml" % (artifact.path(False)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.is_snapshot(): + path = "/%s/maven-metadata.xml" % (artifact.path()) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + basexpath = "/metadata/versioning/" + p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion") + if p: + return self._find_matching_artifact(p, artifact) + else: + return self._uri_for_artifact(artifact) + + def _find_matching_artifact(self, elems, artifact): + filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems) + if artifact.classifier: + filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems) + + if len(filtered) > 1: + print( + "There was more than one match. Selecting the first one. Try adding a classifier to get a better match.") + elif not len(filtered): + print("There were no matches.") + return None + + elem = filtered[0] + value = elem.xpath("value/text()") + return self._uri_for_artifact(artifact, value[0]) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension + + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension + + def _request(self, url, failmsg, f): + if not self.username: + headers = {"User-Agent": self.user_agent} + else: + headers = { + "User-Agent": self.user_agent, + "Authorization": "Basic " + base64.b64encode(self.username + ":" + self.password) + } + req = Request(url, None, headers) + try: + response = urlopen(req) + except HTTPError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + except URLError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + else: + return f(response) + + + def download(self, artifact, filename=None): + filename = artifact.get_filename(filename) + if not artifact.version: + artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), + artifact.classifier, artifact.extension) + + url = self.find_uri_for_artifact(artifact) + if not self.verify_md5(filename, url + ".md5"): + response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) + if response: + with open(filename, 'w') as f: + # f.write(response.read()) + self._write_chunks(response, f, report_hook=self.chunk_report) + return True + else: + return False + else: + return True + + def chunk_report(self, bytes_so_far, chunk_size, total_size): + percent = float(bytes_so_far) / total_size + percent = round(percent * 100, 2) + sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % + (bytes_so_far, total_size, percent)) + + if bytes_so_far >= total_size: + sys.stdout.write('\n') + + def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): + total_size = response.info().getheader('Content-Length').strip() + total_size = int(total_size) + bytes_so_far = 0 + + while 1: + chunk = response.read(chunk_size) + bytes_so_far += len(chunk) + + if not chunk: + break + + file.write(chunk) + if report_hook: + report_hook(bytes_so_far, chunk_size, total_size) + + return bytes_so_far + + def verify_md5(self, file, remote_md5): + if not os.path.exists(file): + return False + else: + local_md5 = self._local_md5(file) + remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) + return local_md5 == remote + + def _local_md5(self, file): + md5 = hashlib.md5() + with open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), ''): + md5.update(chunk) + return md5.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + group_id = dict(default=None), + artifact_id = dict(default=None), + version = dict(default=None), + classifier = dict(default=None), + extension = dict(default=None), + repository_url = dict(default=None), + username = dict(default=None), + password = dict(default=None), + state = dict(default="latest", choices=["present","absent"]), + target = dict(default=None), + ) + ) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + classifier = module.params["classifier"] + extension = module.params["extension"] + repository_url = module.params["repository_url"] + repository_username = module.params["username"] + repository_password = module.params["password"] + state = module.params["state"] + target = module.params["target"] + + if not repository_url: + repository_url = "http://repo1.maven.org/maven2" + + downloader = MavenDownloader(repository_url, repository_username, repository_password) + + try: + artifact = Artifact(group_id, artifact_id, version, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + prev_state = "absent" + if os.path.lexists(target): + prev_state = "present" + else: + path = os.path.dirname(target) + if not os.path.exists(path): + os.makedirs(path) + + if prev_state == "present": + if state == "latest": + artifact_uri = downloader.find_uri_for_artifact(artifact) + if downloader.verify_md5(target, artifact_uri + ".md5"): + module.exit_json(target=target, state=state, changed=False) + else: + module.exit_json(target=target, state=state, changed=False) + try: + if downloader.download(artifact, target): + module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + else: + module.fail_json(msg="Unable to download the artifact") + except ValueError as e: + module.fail_json(msg=e.args[0]) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() \ No newline at end of file From 7e26d715d3f79d26542ff146992a8e4d5af16191 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 23 Oct 2014 21:06:14 -0600 Subject: [PATCH 009/720] Updated w/ license MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added license Added TODO for a “latest” state Removed pending “latest” state work --- packaging/download_artifact.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) mode change 100644 => 100755 packaging/download_artifact.py diff --git a/packaging/download_artifact.py b/packaging/download_artifact.py old mode 100644 new mode 100755 index 16855c142b3..741646dcdf3 --- a/packaging/download_artifact.py +++ b/packaging/download_artifact.py @@ -6,7 +6,18 @@ # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . __author__ = 'cschmidt' @@ -98,7 +109,6 @@ EXAMPLES = ''' - download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war ''' - class Artifact(object): def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): if not group_id: @@ -311,7 +321,7 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="latest", choices=["present","absent"]), + state = dict(default="latest", choices=["present","absent"]), # TODO - Implement a "latest" state target = dict(default=None), ) ) @@ -346,12 +356,8 @@ def main(): os.makedirs(path) if prev_state == "present": - if state == "latest": - artifact_uri = downloader.find_uri_for_artifact(artifact) - if downloader.verify_md5(target, artifact_uri + ".md5"): - module.exit_json(target=target, state=state, changed=False) - else: - module.exit_json(target=target, state=state, changed=False) + module.exit_json(target=target, state=state, changed=False) + try: if downloader.download(artifact, target): module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) From 91d0b2c00ff96b1190839eab95bc0b726b2e594d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 14:14:12 +0100 Subject: [PATCH 010/720] add function for servicegrup downtimes --- monitoring/nagios.py | 53 +++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 9219766b86a..3d46d74ef48 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -169,6 +169,7 @@ def main(): 'silence_nagios', 'unsilence_nagios', 'command', + 'servicegroup_downtime' ] module = AnsibleModule( @@ -176,6 +177,7 @@ def main(): action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), host=dict(required=False, default=None), + servicegroup=dict(required=False, default=None), minutes=dict(default=30), cmdfile=dict(default=which_cmdfile()), services=dict(default=None, aliases=['service']), @@ -185,11 +187,12 @@ def main(): action = module.params['action'] host = module.params['host'] + servicegroup = module.params['servicegroup'] minutes = module.params['minutes'] services = module.params['services'] cmdfile = module.params['cmdfile'] command = module.params['command'] - + ################################################################## # Required args per action: # downtime = (minutes, service, host) @@ -201,7 +204,7 @@ def main(): # 'minutes' and 'service' manually. ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: + if action not in ['command', 'silence_nagios', 'unsilence_nagios', 'servicegroup_downtime']: if not host: module.fail_json(msg='no host specified for action requiring one') ###################################################################### @@ -217,6 +220,20 @@ def main(): except Exception: module.fail_json(msg='invalid entry for minutes') + ###################################################################### + + if action == 'servicegroup_downtime': + # Make sure there's an actual service selected + if not servicegroup: + module.fail_json(msg='no servicegroup selected to set downtime for') + # Make sure minutes is a number + try: + m = int(minutes) + if not isinstance(m, types.IntType): + module.fail_json(msg='minutes must be a number') + except Exception: + module.fail_json(msg='invalid entry for minutes') + ################################################################## if action in ['enable_alerts', 'disable_alerts']: if not services: @@ -259,6 +276,7 @@ class Nagios(object): self.action = kwargs['action'] self.author = kwargs['author'] self.host = kwargs['host'] + self.service_group = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] @@ -356,7 +374,7 @@ class Nagios(object): notif_str = "[%s] %s" % (entry_time, cmd) if host is not None: notif_str += ";%s" % host - + if svc is not None: notif_str += ";%s" % svc @@ -784,42 +802,42 @@ class Nagios(object): return return_str_list else: return "Fail: could not write to the command file" - + def silence_nagios(self): """ This command is used to disable notifications for all hosts and services in nagios. - + This is a 'SHUT UP, NAGIOS' command """ cmd = 'DISABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def unsilence_nagios(self): """ This command is used to enable notifications for all hosts and services in nagios. - + This is a 'OK, NAGIOS, GO'' command """ cmd = 'ENABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def nagios_cmd(self, cmd): """ This sends an arbitrary command to nagios - + It prepends the submitted time and appends a \n - + You just have to provide the properly formatted command """ - + pre = '[%s]' % int(time.time()) - + post = '\n' cmdstr = '%s %s %s' % (pre, cmd, post) self._write_command(cmdstr) - + def act(self): """ Figure out what you want to do from ansible, and then do the @@ -835,6 +853,9 @@ class Nagios(object): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) + if self.action == "servicegroup_downtime": + if self.services == 'servicegroup': + self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) # toggle the host AND service alerts elif self.action == 'silence': @@ -859,13 +880,13 @@ class Nagios(object): services=self.services) elif self.action == 'silence_nagios': self.silence_nagios() - + elif self.action == 'unsilence_nagios': self.unsilence_nagios() - + elif self.action == 'command': self.nagios_cmd(self.command) - + # wtf? else: self.module.fail_json(msg="unknown action specified: '%s'" % \ From b0af1be84acf92da61787c38e2fa80d53f46263b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 14:36:04 +0100 Subject: [PATCH 011/720] divided between host an service downtimes --- monitoring/nagios.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 3d46d74ef48..0044fbd77a4 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -33,7 +33,8 @@ options: required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command" ] + "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", + "servicegroup_host_downtime" ] host: description: - Host to operate on in Nagios. @@ -90,6 +91,12 @@ EXAMPLES = ''' # schedule downtime for a few services - nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} +# set 30 minutes downtime for all services in servicegroup foo +- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + +# set 30 minutes downtime for all host in servicegroup foo +- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + # enable SMART disk alerts - nagios: action=enable_alerts service=smart host={{ inventory_hostname }} @@ -169,9 +176,11 @@ def main(): 'silence_nagios', 'unsilence_nagios', 'command', - 'servicegroup_downtime' + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', ] + module = AnsibleModule( argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), @@ -222,8 +231,8 @@ def main(): ###################################################################### - if action == 'servicegroup_downtime': - # Make sure there's an actual service selected + if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: + # Make sure there's an actual servicegroup selected if not servicegroup: module.fail_json(msg='no servicegroup selected to set downtime for') # Make sure minutes is a number @@ -853,7 +862,10 @@ class Nagios(object): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) - if self.action == "servicegroup_downtime": + elif self.action == "servicegroup_host_downtime": + if self.services == 'servicegroup': + self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + elif self.action == "servicegroup_service_downtime": if self.services == 'servicegroup': self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) From ebda36bb5054fc577a422de6062c24e3083cafcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 15:00:57 +0100 Subject: [PATCH 012/720] improved docs --- monitoring/nagios.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 0044fbd77a4..1ddde5b5b1c 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -66,6 +66,10 @@ options: aliases: [ "service" ] required: true default: null + servicegroup: + description: + - the Servicegroup we want to set downtimes/alerts for. + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: - The raw command to send to nagios, which From 0fa856d467eb839de41a377cf36ce722062fe810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 17:16:48 +0100 Subject: [PATCH 013/720] fix bugs --- monitoring/nagios.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 1ddde5b5b1c..f0904a44e9c 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -217,7 +217,7 @@ def main(): # 'minutes' and 'service' manually. ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios', 'servicegroup_downtime']: + if action not in ['command', 'silence_nagios', 'unsilence_nagios']: if not host: module.fail_json(msg='no host specified for action requiring one') ###################################################################### @@ -289,7 +289,7 @@ class Nagios(object): self.action = kwargs['action'] self.author = kwargs['author'] self.host = kwargs['host'] - self.service_group = kwargs['servicegroup'] + self.servicegroup = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] @@ -867,11 +867,11 @@ class Nagios(object): services=self.services, minutes=self.minutes) elif self.action == "servicegroup_host_downtime": - if self.services == 'servicegroup': - self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes) elif self.action == "servicegroup_service_downtime": - if self.services == 'servicegroup': - self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes) # toggle the host AND service alerts elif self.action == 'silence': From 1e3645a9e3ef63a8bfb9bcc71e586058be3fcf28 Mon Sep 17 00:00:00 2001 From: Nicolas Brisac Date: Fri, 14 Nov 2014 17:09:24 +0100 Subject: [PATCH 014/720] Allow filtering of routed/forwarded packets MAN page states the following : Rules for traffic not destined for the host itself but instead for traffic that should be routed/forwarded through the firewall should specify the route keyword before the rule (routing rules differ significantly from PF syntax and instead take into account netfilter FORWARD chain conventions). For example: ufw route allow in on eth1 out on eth2 This commit introduces a new parameter "route=yes/no" to allow just that. --- system/ufw.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/system/ufw.py b/system/ufw.py index a49aa8c3a49..5500bae0573 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -113,6 +113,11 @@ options: - Specify interface for rule. required: false aliases: ['if'] + route: + description: + - Apply the rule to routed/forwarded packets. + required: false + choices: ['yes', 'no'] ''' EXAMPLES = ''' @@ -162,6 +167,10 @@ ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 # Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. # Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 + +# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24. +# Can be used to further restrict a global FORWARD policy set to allow +ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24 ''' from operator import itemgetter @@ -175,6 +184,7 @@ def main(): logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete = dict(default=False, type='bool'), + route = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), @@ -238,10 +248,11 @@ def main(): elif command == 'rule': # Rules are constructed according to the long format # - # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([module.boolean(params['route']), 'route']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([module.boolean(params['log']), 'log']) From 040135dbbacf51b37f0be75b5348379d8bec2815 Mon Sep 17 00:00:00 2001 From: Sebastian Gumprich Date: Thu, 20 Nov 2014 20:48:41 +0000 Subject: [PATCH 015/720] Added documentation and example for port ranges. Also added punctuation marks. --- system/firewalld.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 22db165aad3..81e7925929d 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -23,22 +23,22 @@ DOCUMENTATION = ''' module: firewalld short_description: Manage arbitrary ports/services with firewalld description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules + - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules. version_added: "1.4" options: service: description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" + - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services." required: false default: null port: description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" + - "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges." required: false default: null rich_rule: description: - - "Rich rule to add/remove to/from firewalld" + - "Rich rule to add/remove to/from firewalld." required: false default: null zone: @@ -49,21 +49,21 @@ options: choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] permanent: description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" + - "Should this configuration be in the running firewalld configuration or persist across reboots." required: true default: true state: description: - - "Should this port accept(enabled) or reject(disabled) connections" + - "Should this port accept(enabled) or reject(disabled) connections." required: true default: enabled timeout: description: - - "The amount of time the rule should be in effect for when non-permanent" + - "The amount of time the rule should be in effect for when non-permanent." required: false default: 0 notes: - - Not tested on any debian based system + - Not tested on any debian based system. requirements: [ firewalld >= 0.2.11 ] author: Adam Miller ''' @@ -71,6 +71,7 @@ author: Adam Miller EXAMPLES = ''' - firewalld: service=https permanent=true state=enabled - firewalld: port=8081/tcp permanent=true state=disabled +- firewalld: port=161-162/udp permanent=true state=enabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled ''' From 5f870b094b4e682c654ff6c298f4dd3b9e5dd486 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 14:26:47 +0100 Subject: [PATCH 016/720] added a source/network add/remove to/from zone for firewalld --- system/firewalld.py | 55 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 22db165aad3..ec4be051c9e 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -41,6 +41,11 @@ options: - "Rich rule to add/remove to/from firewalld" required: false default: null + source: + description: + - 'The source/network you would like to add/remove to/from firewalld' + required: false + default: null zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -73,6 +78,7 @@ EXAMPLES = ''' - firewalld: port=8081/tcp permanent=true state=disabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +- firewalld: source='192.168.1.0/24' zone=internal state=enabled ''' import os @@ -128,7 +134,29 @@ def set_port_disabled_permanent(zone, port, protocol): fw_settings = fw_zone.getSettings() fw_settings.removePort(port, protocol) fw_zone.update(fw_settings) - + +#################### +# source handling +# +def get_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if source in fw_settings.getSources(): + return True + else: + return False + +def add_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addSource(source) + fw_zone.update(fw_settings) + +def remove_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeSource(source) + fw_zone.update(fw_settings) #################### # service handling @@ -210,12 +238,15 @@ def main(): port=dict(required=False,default=None), rich_rule=dict(required=False,default=None), zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), + source=dict(required=False,default=None), + permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), ), supports_check_mode=True ) + if module.params['source'] == None and module.params['permanent'] == None: + module.fail(msg='permanent is a required parameter') ## Pre-run version checking if FW_VERSION < "0.2.11": @@ -226,6 +257,7 @@ def main(): msgs = [] service = module.params['service'] rich_rule = module.params['rich_rule'] + source = module.params['source'] if module.params['port'] != None: port, protocol = module.params['port'].split('/') @@ -304,6 +336,25 @@ def main(): if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + if source != None: + is_enabled = get_source(zone, source) + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + add_source(zone, source) + changed=True + msgs.append("Added %s to zone %s" % (source, zone)) + elif desired_state == "disabled": + if is_enabled == True: + msgs.append("source is present") + if module.check_mode: + module.exit_json(changed=True) + + remove_source(zone, source) + changed=True + msgs.append("Removed %s from zone %s" % (source, zone)) if port != None: if permanent: is_enabled = get_port_enabled_permanent(zone, [port, protocol]) From d6fbfdefd5ced3c8db63f0bef14900a816fddb5b Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 15:39:07 +0100 Subject: [PATCH 017/720] added a source/network add/remove to/from zone for firewalld - removed useless comment --- system/firewalld.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index ec4be051c9e..ed49f0860be 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -150,13 +150,11 @@ def add_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.addSource(source) - fw_zone.update(fw_settings) def remove_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.removeSource(source) - fw_zone.update(fw_settings) #################### # service handling @@ -348,7 +346,6 @@ def main(): msgs.append("Added %s to zone %s" % (source, zone)) elif desired_state == "disabled": if is_enabled == True: - msgs.append("source is present") if module.check_mode: module.exit_json(changed=True) From ca94781d5c29f78f6a380a024821ba8360b67b78 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Nov 2014 15:50:27 -0600 Subject: [PATCH 018/720] Adding VERSION file for 1.8.0 --- VERSION | 1 + 1 file changed, 1 insertion(+) create mode 100644 VERSION diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000..27f9cd322bb --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.8.0 From cf54dc46b49adfccf377d646727d922cf7c7d659 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 26 Nov 2014 21:32:16 -0600 Subject: [PATCH 019/720] Version bump for extras release 1.8.1 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 27f9cd322bb..a8fdfda1c78 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.0 +1.8.1 From 0957768755f6d9eb1b93641326fc3ce93f19c497 Mon Sep 17 00:00:00 2001 From: Balazs Pocze Date: Mon, 1 Dec 2014 15:16:40 +0100 Subject: [PATCH 020/720] gtid_replication parameter added to keep this module working in environments which are using GTID replication --- database/mysql/mysql_replication.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index b93150a43b5..9ee71aa6021 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -103,7 +103,10 @@ options: master_ssl_cipher: description: - same as mysql variable - + gtid_replication: + descrtiption: + - does the host uses GTID based replication or not + possible values: 0,1 ''' EXAMPLES = ''' @@ -232,6 +235,7 @@ def main(): login_host=dict(default="localhost"), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), + gtid_replication=dict(default=None, choices=['0', '1']), master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), @@ -268,6 +272,7 @@ def main(): master_ssl_cert = module.params["master_ssl_cert"] master_ssl_key = module.params["master_ssl_key"] master_ssl_cipher = module.params["master_ssl_cipher"] + gtid_replication = module.params["gtid_replication"] if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") @@ -364,6 +369,8 @@ def main(): if master_ssl_cipher: chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s") chm_params['master_ssl_cipher'] = master_ssl_cipher + if gtid_replication: + chm.append("MASTER_AUTO_POSITION = 1") changemaster(cursor, chm, chm_params) module.exit_json(changed=True) elif mode in "startslave": From 76142ddb9721143f44b36b7507393e0413fcbdbc Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 4 Dec 2014 11:25:06 +0100 Subject: [PATCH 021/720] Allow multiple versions in rpm state Fix bug in ansible get_package_state and get_current_version that breaks when there are multiple versions of a package installed and there is a list of packages to install. The previous implementation used 'zip' to match requested names to installed names which fails, because rpm outputs multiple lines per package when there are multiple versions. Testcase: Install opensuse, install multiple kernel versions (happens by update) Before patch: calling zypper: state=present for name={{item}} with_items: - kernel-desktop - git leads to ansible aborting. After the patch ansible performs as expected and makes sure both packages are present. Also the last version number is used for further update information in this version (before if only one package name was given the oldest version number was used). --- packaging/os/zypper.py | 48 ++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 87bbcd1f135..7091145423b 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -95,25 +95,31 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version(m, name): +def get_current_version( packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(name) - (rc, stdout, stderr) = m.run_command(cmd) + cmd.extend(packages) + + stdout = subprocess.check_output(cmd) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') - for stdoutline, package in zip(stdout.splitlines(), name): - m = rpmoutput_re.match(stdoutline) - if m == None: + + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None - rpmpackage = m.group(1) - rpmversion = m.group(2) - if package != rpmpackage: + package = match.group(1) + version = match.group(2) + current_version[package] = version + + for package in packages: + if package not in current_version: + print package + ' was not returned by rpm \n' return None - current_version[package] = rpmversion return current_version + # Function used to find out if a package is currently installed. def get_package_state(m, packages): cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] @@ -123,19 +129,21 @@ def get_package_state(m, packages): installed_state = {} rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline, name in zip(stdout.splitlines(), packages): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - package = m.group(1) - result = m.group(2) - if not name.startswith(package): - print name + ':' + package + ':' + stdoutline + '\n' + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None + package = match.group(1) + result = match.group(2) if result == 'is installed': - installed_state[name] = True + installed_state[package] = True else: - installed_state[name] = False + installed_state[package] = False + + for package in packages: + if package not in installed_state: + print package + ' was not returned by rpm \n' + return None return installed_state From 7948b91bad04f0f3a13dc44bacaf74dc7464b24b Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 4 Dec 2014 11:28:18 +0100 Subject: [PATCH 022/720] fix local change --- packaging/os/zypper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 7091145423b..c848e86fcc6 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -95,11 +95,11 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version( packages): +def get_current_version(m, packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] cmd.extend(packages) - stdout = subprocess.check_output(cmd) + rc, stdout, stderr = m.run_command(cmd, check_rc=False) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') From c60441fddd7433c9d258b3837be1669f4d73e725 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 4 Dec 2014 15:50:48 -0600 Subject: [PATCH 023/720] Version bump for 1.8.2 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index a8fdfda1c78..53adb84c822 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.1 +1.8.2 From 797d8893d65a8a214c68c82be3ccec11462077ff Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Tue, 25 Nov 2014 14:43:47 -0600 Subject: [PATCH 024/720] Fix some logical issues with enabling/disabling a server on the A10. --- network/a10/a10_server.py | 51 +++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 65410536eef..109828772c1 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -183,28 +183,35 @@ def main(): json_post = { 'server': { - 'name': slb_server, - 'host': slb_server_ip, - 'status': axapi_enabled_disabled(slb_server_status), - 'port_list': slb_server_ports, + 'name': slb_server, } } + # add optional module parameters + if slb_server_ip: + json_post['server']['host'] = slb_server_ip + + if slb_server_ports: + json_post['server']['port_list'] = slb_server_ports + + if slb_server_status: + json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) slb_server_exists = not axapi_failure(slb_server_data) changed = False if state == 'present': - if not slb_server_ip: - module.fail_json(msg='you must specify an IP address when creating a server') - if not slb_server_exists: + if not slb_server_ip: + module.fail_json(msg='you must specify an IP address when creating a server') + result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) changed = True else: - def needs_update(src_ports, dst_ports): + def port_needs_update(src_ports, dst_ports): ''' Checks to determine if the port definitions of the src_ports array are in or different from those in dst_ports. If there is @@ -227,12 +234,26 @@ def main(): # every port from the src exists in the dst, and none of them were different return False + def status_needs_update(current_status, new_status): + ''' + Check to determine if we want to change the status of a server. + If there is a difference between the current status of the server and + the desired status, return true, otherwise false. + ''' + if current_status != new_status: + return True + return False + defined_ports = slb_server_data.get('server', {}).get('port_list', []) + current_status = slb_server_data.get('server', {}).get('status') - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): + # we check for a needed update several ways + # - in case ports are missing from the ones specified by the user + # - in case ports are missing from those on the device + # - in case we are change the status of a server + if port_needs_update(defined_ports, slb_server_ports) + or port_needs_update(slb_server_ports, defined_ports) + or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) @@ -249,10 +270,10 @@ def main(): result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) changed = True else: - result = dict(msg="the server was not present") + result = dict(msg="the server was not present") - # if the config has changed, save the config unless otherwise requested - if changed and write_config: + # if the config has changed, or we want to force a save, save the config unless otherwise requested + if changed or write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) From 1011565282715e943062cbb17d395f86738f3626 Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Thu, 4 Dec 2014 16:15:23 -0600 Subject: [PATCH 025/720] Fix small issue with wrapping syntax --- network/a10/a10_server.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 109828772c1..7df1d6d8c9e 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -251,9 +251,7 @@ def main(): # - in case ports are missing from the ones specified by the user # - in case ports are missing from those on the device # - in case we are change the status of a server - if port_needs_update(defined_ports, slb_server_ports) - or port_needs_update(slb_server_ports, defined_ports) - or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): + if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) From 226144512da5aa3e2d2704f833c83a5228d93596 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 11 Dec 2014 17:21:38 -0700 Subject: [PATCH 026/720] Renamted module from download_artifact to maven_artifact --- packaging/{download_artifact.py => maven_artifact.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging/{download_artifact.py => maven_artifact.py} (100%) diff --git a/packaging/download_artifact.py b/packaging/maven_artifact.py similarity index 100% rename from packaging/download_artifact.py rename to packaging/maven_artifact.py From 9498d3de9c09c73fcf47a30bc1db22994b04327a Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 11 Dec 2014 17:23:25 -0700 Subject: [PATCH 027/720] Renamd Module File --- packaging/maven_artifact.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 741646dcdf3..0c8070d1e46 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -30,7 +30,7 @@ import base64 DOCUMENTATION = ''' --- -module: download_artifact +module: maven_artifact short_description: Downloads an Artifact from a Maven Repository version_added: "historical" description: @@ -97,20 +97,20 @@ options: EXAMPLES = ''' # Download the latest version of the commons-collections artifact from Maven Central -- download_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar # Download Apache Commons-Collections 3.2 from Maven Central -- download_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar # Download an artifact from a private repository requiring authentication -- download_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed -- download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war ''' class Artifact(object): - def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): + def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): if not group_id: raise ValueError("group_id must be set") if not artifact_id: From 6fab8f49a965c708be9ac2290c074d050d6a6832 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 14:26:47 +0100 Subject: [PATCH 028/720] added a source/network add/remove to/from zone for firewalld --- system/firewalld.py | 55 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index dedc9260740..ace5e5fd1e4 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -41,6 +41,11 @@ options: - "Rich rule to add/remove to/from firewalld" required: false default: null + source: + description: + - 'The source/network you would like to add/remove to/from firewalld' + required: false + default: null zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -77,6 +82,7 @@ EXAMPLES = ''' - firewalld: port=8081/tcp permanent=true state=disabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +- firewalld: source='192.168.1.0/24' zone=internal state=enabled ''' import os @@ -132,7 +138,29 @@ def set_port_disabled_permanent(zone, port, protocol): fw_settings = fw_zone.getSettings() fw_settings.removePort(port, protocol) fw_zone.update(fw_settings) - + +#################### +# source handling +# +def get_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if source in fw_settings.getSources(): + return True + else: + return False + +def add_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addSource(source) + fw_zone.update(fw_settings) + +def remove_source(zone, source): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeSource(source) + fw_zone.update(fw_settings) #################### # service handling @@ -214,13 +242,16 @@ def main(): port=dict(required=False,default=None), rich_rule=dict(required=False,default=None), zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), immediate=dict(type='bool',default=False), + source=dict(required=False,default=None), + permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), ), supports_check_mode=True ) + if module.params['source'] == None and module.params['permanent'] == None: + module.fail(msg='permanent is a required parameter') ## Pre-run version checking if FW_VERSION < "0.2.11": @@ -231,6 +262,7 @@ def main(): msgs = [] service = module.params['service'] rich_rule = module.params['rich_rule'] + source = module.params['source'] if module.params['port'] != None: port, protocol = module.params['port'].split('/') @@ -310,6 +342,25 @@ def main(): if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + if source != None: + is_enabled = get_source(zone, source) + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + add_source(zone, source) + changed=True + msgs.append("Added %s to zone %s" % (source, zone)) + elif desired_state == "disabled": + if is_enabled == True: + msgs.append("source is present") + if module.check_mode: + module.exit_json(changed=True) + + remove_source(zone, source) + changed=True + msgs.append("Removed %s from zone %s" % (source, zone)) if port != None: if permanent: is_enabled = get_port_enabled_permanent(zone, [port, protocol]) From b365fc44645a4d81b7e7780708a4b7dd24faf1ce Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Fri, 21 Nov 2014 15:39:07 +0100 Subject: [PATCH 029/720] added a source/network add/remove to/from zone for firewalld - removed useless comment --- system/firewalld.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index ace5e5fd1e4..cf90c5ace56 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -154,13 +154,11 @@ def add_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.addSource(source) - fw_zone.update(fw_settings) def remove_source(zone, source): fw_zone = fw.config().getZoneByName(zone) fw_settings = fw_zone.getSettings() fw_settings.removeSource(source) - fw_zone.update(fw_settings) #################### # service handling @@ -354,7 +352,6 @@ def main(): msgs.append("Added %s to zone %s" % (source, zone)) elif desired_state == "disabled": if is_enabled == True: - msgs.append("source is present") if module.check_mode: module.exit_json(changed=True) From 1bb8abffa367d9c186dd3814bc7c5221dd885526 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Wed, 7 Jan 2015 00:11:16 -0700 Subject: [PATCH 030/720] Changed "target" to "dest" --- packaging/maven_artifact.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 0c8070d1e46..f02ad166d2b 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -82,7 +82,7 @@ options: required: false default: null version_added: 0.0.1 - target: + dest: description: The path where the artifact should be written to required: true default: false @@ -97,16 +97,16 @@ options: EXAMPLES = ''' # Download the latest version of the commons-collections artifact from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar # Download Apache Commons-Collections 3.2 from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar # Download an artifact from a private repository requiring authentication -- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed -- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war ''' class Artifact(object): @@ -321,8 +321,8 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="latest", choices=["present","absent"]), # TODO - Implement a "latest" state - target = dict(default=None), + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + dest = dict(default=None), ) ) @@ -335,7 +335,7 @@ def main(): repository_username = module.params["username"] repository_password = module.params["password"] state = module.params["state"] - target = module.params["target"] + dest = module.params["dest"] if not repository_url: repository_url = "http://repo1.maven.org/maven2" @@ -348,19 +348,19 @@ def main(): module.fail_json(msg=e.args[0]) prev_state = "absent" - if os.path.lexists(target): + if os.path.lexists(dest): prev_state = "present" else: - path = os.path.dirname(target) + path = os.path.dirname(dest) if not os.path.exists(path): os.makedirs(path) if prev_state == "present": - module.exit_json(target=target, state=state, changed=False) + module.exit_json(dest=dest, state=state, changed=False) try: if downloader.download(artifact, target): - module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") except ValueError as e: From 44bfe5a7d6c85ad2a94cda25fcff28af5d115c7b Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Wed, 7 Jan 2015 00:35:48 -0700 Subject: [PATCH 031/720] Fixed bug where passing a directory as dest failed --- packaging/maven_artifact.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index f02ad166d2b..bf4ca59f92c 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -348,6 +348,9 @@ def main(): module.fail_json(msg=e.args[0]) prev_state = "absent" + if os.path.isdir(dest): + dest = dest + "/" + artifact_id + "-" + version + ".jar" + if os.path.lexists(dest): prev_state = "present" else: @@ -359,7 +362,7 @@ def main(): module.exit_json(dest=dest, state=state, changed=False) try: - if downloader.download(artifact, target): + if downloader.download(artifact, dest): module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") From 1d60d33dc1287235f96fab937708292705303e95 Mon Sep 17 00:00:00 2001 From: Yuri Kunde Schlesner Date: Thu, 8 Jan 2015 21:44:58 -0200 Subject: [PATCH 032/720] crypttab: Fix parameter checking with state=absent Only the `name` parameter is required when removing an entry, but the module tried to ensure at least one other parameter was set. --- system/crypttab.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/crypttab.py b/system/crypttab.py index 70230fa53e1..ffb60516f3d 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -103,7 +103,7 @@ def main(): state = module.params['state'] path = module.params['path'] - if backing_device is None and password is None and opts is None: + if state != 'absent' and backing_device is None and password is None and opts is None: module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", **module.params) From 555ff23434aa5810f035963c6197596edba14836 Mon Sep 17 00:00:00 2001 From: Billy Kimble Date: Mon, 12 Jan 2015 14:13:08 -0800 Subject: [PATCH 033/720] added hall.com notification module --- notification/hall.py | 97 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100755 notification/hall.py diff --git a/notification/hall.py b/notification/hall.py new file mode 100755 index 00000000000..7c76e52379f --- /dev/null +++ b/notification/hall.py @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Billy Kimble +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = """ +module: hall +short_description: Send notification to Hall +description: + - The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms. +version_added: 1.6 +author: Billy Kimble +options: + room_token: + description: + - Room token provided to you by setting up the Ansible room integation on U(https://hall.com) + required: true + msg: + description: + - The message you wish to deliver as a notifcation + required: true + title: + description: + - The title of the message + required: true + picture: + description: + - The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627) + required: false +""" + +EXAMPLES = """ +- name: Send Hall notifiation + local_action: + module: hall + room_token: + title: Nginx + msg: Created virtual host file on {{ inventory_hostname }} + +- name: Send Hall notification if EC2 servers were created. + when: ec2.instances|length > 0 + local_action: + module: hall + room_token: + title: Server Creation + msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." + with_items: ec2.instances +""" + +HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' + +def send_request_to_hall(module, room_token, payload): + headers = {'Content-Type': 'application/json'} + payload=module.jsonify(payload) + api_endpoint = HALL_API_ENDPOINT % (room_token) + response, info = fetch_url(module, api_endpoint, data=payload, headers=headers) + if info['status'] != 200: + secure_url = HALL_API_ENDPOINT % ('[redacted]') + module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg'])) + +def main(): + module = AnsibleModule( + argument_spec = dict( + room_token = dict(type='str', required=True), + msg = dict(type='str', required=True), + title = dict(type='str', required=True), + picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'), + ) + ) + + room_token = module.params['room_token'] + message = module.params['msg'] + title = module.params['title'] + picture = module.params['picture'] + payload = {'title': title, 'message': message, 'picture': picture} + send_request_to_hall(module, room_token, payload) + module.exit_json(msg="OK") + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() From adb1f0a1c8b8d60f5162e3121275decdaedfbf33 Mon Sep 17 00:00:00 2001 From: Rob White Date: Thu, 22 Jan 2015 13:37:37 +1100 Subject: [PATCH 034/720] Convert symlinks specified in pvs to actual path --- system/lvg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/lvg.py b/system/lvg.py index e568e9df677..295ee24e3c6 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -135,7 +135,9 @@ def main(): elif state == 'present': module.fail_json(msg="No physical volumes given.") - + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) if state=='present': ### check given devices From b1629ac77aae85bac95b7b315e82010c6da67bb3 Mon Sep 17 00:00:00 2001 From: Will Date: Thu, 22 Jan 2015 09:05:32 -0500 Subject: [PATCH 035/720] Allow sending direct messages with slack module --- notification/slack.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/notification/slack.py b/notification/slack.py index 5577228978b..b0dc9489182 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -111,7 +111,10 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj payload = dict(text=text) if channel is not None: - payload['channel'] = channel if (channel[0] == '#') else '#'+channel + if (channel[0] == '#') or (channel[0] == '@') + payload['channel'] = channel + else + payload['channel'] = '#'+channel if username is not None: payload['username'] = username if icon_emoji is not None: From cb46aab3d1444b732095bf7a4f5a8e36fa26d50d Mon Sep 17 00:00:00 2001 From: Giovanni Tirloni Date: Thu, 22 Jan 2015 09:13:12 -0500 Subject: [PATCH 036/720] add createparent option to zfs create --- system/zfs.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index 93248897051..cd4c017c303 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -250,7 +250,7 @@ class Zfs(object): if self.module.check_mode: self.changed = True return - properties=self.properties + properties = self.properties volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) if "@" in self.name: @@ -260,6 +260,10 @@ class Zfs(object): cmd = [self.module.get_bin_path('zfs', True)] cmd.append(action) + + if createparent: + cmd.append('-p') + if volblocksize: cmd.append('-b %s' % volblocksize) if properties: @@ -271,7 +275,7 @@ class Zfs(object): cmd.append(self.name) (rc, err, out) = self.module.run_command(' '.join(cmd)) if rc == 0: - self.changed=True + self.changed = True else: self.module.fail_json(msg=out) @@ -345,6 +349,7 @@ def main(): 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, 'copies': {'required': False, 'choices':['1', '2', '3']}, + 'createparent': {'required': False, 'choices':['on', 'off']}, 'dedup': {'required': False, 'choices':['on', 'off']}, 'devices': {'required': False, 'choices':['on', 'off']}, 'exec': {'required': False, 'choices':['on', 'off']}, @@ -396,7 +401,7 @@ def main(): result['name'] = name result['state'] = state - zfs=Zfs(module, name, properties) + zfs = Zfs(module, name, properties) if state == 'present': if zfs.exists(): From ea6c887d6c768d456226ae881bb8b4292bd26058 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sat, 24 Jan 2015 01:33:53 +0000 Subject: [PATCH 037/720] Initial commit of Ansible support for the Consul clustering framework (http://consul.io). Submission includes support for - creating and registering services and checks - reading, writing and lookup for values in consul's kv store - creating and manipulating sessions for distributed locking on values in the kv - creating and manipulating ACLs for restricting access to the kv store - inventory support that reads the Consul catalog and group nodes according to - datacenters - exposed services - service availability - arbitrary groupings from the kv store This submission makes extensive use of the python-consul library and this is required as a dependency and can be installed from pip. The tests were written to target a vagrant cluster which can be setup by following the instructions here http://github.com/sgargan/consul-vagrant --- clustering/consul | 463 ++++++++++++++++++++++++++++++++++++++ clustering/consul_acl | 298 ++++++++++++++++++++++++ clustering/consul_kv | 238 ++++++++++++++++++++ clustering/consul_session | 213 ++++++++++++++++++ 4 files changed, 1212 insertions(+) create mode 100644 clustering/consul create mode 100644 clustering/consul_acl create mode 100644 clustering/consul_kv create mode 100644 clustering/consul_session diff --git a/clustering/consul b/clustering/consul new file mode 100644 index 00000000000..fa1e06c3678 --- /dev/null +++ b/clustering/consul @@ -0,0 +1,463 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul +short_description: "Add, modify & delete services within a consul cluster. + See http://conul.io for more details." +description: + - registers services and checks for an agent with a consul cluster. A service + is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition + that will be used to notify the consul cluster of the health of the service. + Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:'. + Node level checks require a check_name and optionally a check_id Currently, + there is no complete way to retrieve the script, interval or ttl metadata for + a registered check. Without this metadata it is not possible to tell if + the data supplied with ansible represents a change to a check. As a result + this does not attempt to determine changes and will always report a changed + occurred. An api method is planned to supply this metadata so at that stage + change management will be added. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - register or deregister the consul service, defaults to present + required: true + choices: ['present', 'absent'] + service_id: + description: + - the ID for the service, must be unique per node, defaults to the + service name + required: false + host: + description: + - host of the consul agent with which to register the service, + defaults to localhost + required: false + notes: + description: + - Notes to attach to check when registering it. + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false + service_port: + description: + - the port on which the service is listening required for + registration of a service. + required: true + tags: + description: + - a list of tags that will be attached to the service registration. + required: false + script: + description: + - the script/command that will be run periodically to check the health + of the service + required: false + interval: + description: + - the interval at which the service check will be run. This is by + convention a number with a s or m to signify the units of seconds + or minutes. if none is supplied, m will be appended + check_id: + description: + - an ID for the service check, defaults to the check name, ignored if + part of service definition. + check_name: + description: + - a name for the service check, defaults to the check id. required if + standalone, ignored if part of service definition. +""" + +EXAMPLES = ''' + - name: register nginx service with the local consul agent + consul: + name: nginx + port: 80 + + - name: register nginx service with curl check + consul: + name: nginx + port: 80 + script: "curl http://localhost" + interval: 60s + + - name: register nginx with some service tags + consul: + name: nginx + port: 80 + tags: + - prod + - webservers + + - name: remove nginx service + consul: + name: nginx + state: absent + + - name: create a node level check to test disk usage + consul: + check_name: Disk usage + check_id: disk_usage + script: "/opt/disk_usage.py" + interval: 5m + +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + + +def register_with_consul(module): + + state = module.params.get('state') + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params.get('service_id') or module.params.get('service_name') + check_id = module.params.get('check_id') or module.params.get('check_name') + if not (service_id or check_id): + module.fail_json(msg='services and checks are removed by id or name.'\ + ' please supply a service id/name or a check id/name') + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name: + module.fail_json(msg='a check name is required for a node level check,'\ + ' one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id(consul_api, service.id) + + # there is no way to retreive the details of checks so if a check is present + # in the service it must be reregistered + if service.has_checks() or not(existing or existing == service): + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=map(lambda x: x.to_dict(), service.checks), + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def get_service_by_id(consul_api, service_id): + ''' iterate the registered services and find one with the given id ''' + for name, service in consul_api.agent.services().iteritems(): + if service['ID'] == service_id: + return ConsulService(loaded=service) + + +def parse_check(module): + + if module.params.get('script') and module.params.get('ttl'): + module.fail_json( + msg='check are either script or ttl driven, supplying both does'\ + ' not make sense') + + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'): + + return ConsulCheck( + module.params.get('check_id'), + module.params.get('check_name'), + module.params.get('check_node'), + module.params.get('check_host'), + module.params.get('script'), + module.params.get('interval'), + module.params.get('ttl'), + module.params.get('notes') + ) + + +def parse_service(module): + + if module.params.get('service_name') and module.params.get('service_port'): + return ConsulService( + module.params.get('service_id'), + module.params.get('service_name'), + module.params.get('service_port'), + module.params.get('tags'), + ) + elif module.params.get('service_name') and not module.params.get('service_port'): + + module.fail_json( + msg="service_name supplied but no service_port, a port is required"\ + " to configure a service. Did you configure the 'port' "\ + "argument meaning 'service_port'?") + + +class ConsulService(): + + def __init__(self, service_id=None, name=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.port = port + self.tags = tags + self.checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + if len(self.checks) > 0: + check = self.checks[0] + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags, + script=check.script, + interval=check.interval, + ttl=check.ttl) + else: + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags) + + def add_check(self, check): + self.checks.append(check) + + def checks(self): + return self.checks + + def has_checks(self): + return len(self.checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.id == other.id + and self.name == other.name + and self.port == other.port + and self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self.checks) > 0: + data['check'] = self.checks[0].to_dict() + return data + + +class ConsulCheck(): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.script = script + self.interval = str(interval) + + if not self.interval.endswith('m') or self.interval.endswith('s'): + self.interval += 'm' + + self.ttl = ttl + self.notes = notes + self.node = node + self.host = host + + if interval and interval <= 0: + raise Error('check interval must be positive') + + if ttl and ttl <= 0: + raise Error('check ttl value must be positive') + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, + script=self.script, + interval=self.interval, + ttl=self.ttl, notes=self.notes) + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.check_id == other.check_id + and self.name == other.name + and self.script == script + and self.interval == interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + return data + + def _add(self, data, key, attr=None): + try: + if attr == None: + attr = key + data[key] = getattr(self, attr) + except: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + check_id=dict(required=False), + check_name=dict(required=False), + host=dict(default='localhost'), + interval=dict(required=False, default='1m'), + check_node=dict(required=False), + check_host=dict(required=False), + notes=dict(required=False), + port=dict(default=8500, type='int'), + script=dict(required=False), + service_id=dict(required=False), + service_name=dict(required=False), + service_port=dict(required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(required=False, type='list'), + token=dict(required=False), + url=dict(default='http://localhost:8500') + ), + supports_check_mode=False, + ) + try: + register_with_consul(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_acl b/clustering/consul_acl new file mode 100644 index 00000000000..ae3efe5787f --- /dev/null +++ b/clustering/consul_acl @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_acl +short_description: "manipulate consul acl keys and rules" +description: + - allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + state: + description: + - whether the ACL pair should be present or absent, defaults to present + required: false + choices: ['present', 'absent'] + type: + description: + - the type of token that should be created, either management or + client, defaults to client + choices: ['client', 'management'] + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + token: + description: + - the token key indentifying an ACL rule set. If generated by consul + this will be a UUID. + required: false + rules: + description: + - an list of the rules that should be associated with a given key/token. + required: false +""" + +EXAMPLES = ''' + - name: create an acl token with rules + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + name: 'Foo access' + rules: + - key: 'foo' + policy: read + - key: 'private/foo' + policy: deny + + - name: remove a token + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e' + state: absent +''' + +import sys +import urllib2 + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + +try: + import hcl +except ImportError: + print "failed=True msg='pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl'" + sys.exit(1) + +import epdb + + +def execute(module): + + state = module.params.get('state') + + if state == 'present': + update_acl(module) + else: + remove_acl(module) + + +def update_acl(module): + + rules = module.params.get('rules') + state = module.params.get('state') + token = module.params.get('token') + token_type = module.params.get('token_type') + mgmt = module.params.get('mgmt_token') + name = module.params.get('name') + consul = get_consul_api(module, mgmt) + changed = False + + try: + + if token: + existing_rules = load_rules_for_token(module, consul, token) + supplied_rules = yml_to_rules(module, rules) + print existing_rules + print supplied_rules + changed = not existing_rules == supplied_rules + if changed: + y = supplied_rules.to_hcl() + token = consul.acl.update( + token, + name=name, + type=token_type, + rules=supplied_rules.to_hcl()) + else: + try: + rules = yml_to_rules(module, rules) + if rules.are_rules(): + rules = rules.to_json() + else: + rules = None + + token = consul.acl.create( + name=name, type=token_type, rules=rules) + changed = True + except Exception, e: + module.fail_json( + msg="No token returned, check your managment key and that \ + the host is in the acl datacenter %s" % e) + except Exception, e: + module.fail_json(msg="Could not create/update acl %s" % e) + + module.exit_json(changed=changed, + token=token, + rules=rules, + name=name, + type=token_type) + + +def remove_acl(module): + state = module.params.get('state') + token = module.params.get('token') + mgmt = module.params.get('mgmt_token') + + consul = get_consul_api(module, token=mgmt) + changed = token and consul.acl.info(token) + if changed: + token = consul.acl.destroy(token) + + module.exit_json(changed=changed, token=token) + + +def load_rules_for_token(module, consul_api, token): + try: + rules = Rules() + info = consul_api.acl.info(token) + if info and info['Rules']: + rule_set = to_ascii(info['Rules']) + for rule in hcl.loads(rule_set).values(): + for key, policy in rule.iteritems(): + rules.add_rule(Rule(key, policy['policy'])) + return rules + except Exception, e: + module.fail_json( + msg="Could not load rule list from retrieved rule data %s, %s" % ( + token, e)) + + return json_to_rules(module, loaded) + +def to_ascii(unicode_string): + if isinstance(unicode_string, unicode): + return unicode_string.encode('ascii', 'ignore') + return unicode_string + +def yml_to_rules(module, yml_rules): + rules = Rules() + if yml_rules: + for rule in yml_rules: + if not('key' in rule or 'policy' in rule): + module.fail_json(msg="a rule requires a key and a policy.") + rules.add_rule(Rule(rule['key'], rule['policy'])) + return rules + +template = '''key "%s" { + policy = "%s" +}''' + +class Rules: + + def __init__(self): + self.rules = {} + + def add_rule(self, rule): + self.rules[rule.key] = rule + + def are_rules(self): + return len(self.rules) > 0 + + def to_json(self): + # import epdb; epdb.serve() + rules = {} + for key, rule in self.rules.iteritems(): + rules[key] = {'policy': rule.policy} + return json.dumps({'keys': rules}) + + def to_hcl(self): + + rules = "" + for key, rule in self.rules.iteritems(): + rules += template % (key, rule.policy) + + return to_ascii(rules) + + def __eq__(self, other): + if not (other or isinstance(other, self.__class__) + or len(other.rules) == len(self.rules)): + return False + + for name, other_rule in other.rules.iteritems(): + if not name in self.rules: + return False + rule = self.rules[name] + + if not (rule and rule == other_rule): + return False + return True + + def __str__(self): + return self.to_hcl() + +class Rule: + + def __init__(self, key, policy): + self.key = key + self.policy = policy + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.key == other.key + and self.policy == other.policy) + def __hash__(self): + return hash(self.key) ^ hash(self.policy) + + def __str__(self): + return '%s %s' % (self.key, self.policy) +def get_consul_api(module, token=None): + if not token: + token = token = module.params.get('token') + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=token) + + +def main(): + argument_spec = dict( + mgmt_token=dict(required=True), + host=dict(default='localhost'), + name=dict(required=False), + port=dict(default=8500, type='int'), + rules=dict(default=None, required=False, type='list'), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False), + token_type=dict( + required=False, choices=['client', 'management'], default='client') + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_kv b/clustering/consul_kv new file mode 100644 index 00000000000..6a2b77ea7c6 --- /dev/null +++ b/clustering/consul_kv @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_kv +short_description: "manipulate entries in the key/value store of a consul + cluster. See http://www.consul.io/docs/agent/http.html#kv for more details." +description: + - allows the addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as 'value'. If the key + represents a prefix then Note that when a value is removed, the existing + value if any is returned as part of the results. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - the action to take with the supplied key and value. If the state is + 'present', the key contents will be set to the value supplied, + 'changed' will be set to true only if the value was different to the + current contents. The state 'absent' will remove the key/value pair, + again 'changed' will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states 'acquire' or + 'release' respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + required: true + choices: ['present', 'absent', 'acquire', 'release'] + key: + description: + - the key at which the value should be stored. + required: true + value: + description: + - the value should be associated with the given key, required if state + is present + required: true + recurse: + description: + - if the key represents a prefix, each entry with the prefix can be + retrieved by setting this to true. + required: true + session: + description: + - the session that should be used to acquire or release a lock + associated with a key/value pair + token: + description: + - the token key indentifying an ACL rule set that controls access to + the key value pair + required: false + url: + description: + - location of the consul agent with which access the keay/value store, + defaults to http://localhost:8500 + required: false + cas: + description: + - used when acquiring a lock with a session. If the cas is 0, then + Consul will only put the key if it does not already exist. If the + cas value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + flags: + description: + - opaque integer value that can be passed when setting a value. +""" + + +EXAMPLES = ''' + + - name: add or update the value associated with a key in the key/value store + consul_kv: + key: somekey + value: somevalue + + - name: remove a key from the store + consul_kv: + key: somekey + state: absent + + - name: add a node to an arbitrary group via consul inventory (see consul.ini) + consul_kv: + key: ansible/groups/dc1/somenode + value: 'top_secret' +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print """failed=True msg='python-consul required for this module. \ + see http://python-consul.readthedocs.org/en/latest/#installation'""" + sys.exit(1) + + +def execute(module): + + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + if state == 'present': + add_value(module) + else: + remove_value(module) + + +def lock(module, state): + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + if state == 'acquire': + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=successful, + index=index, + key=key) + + +def add_value(module): + + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get(key) + + changed = not existing or (existing and existing['Value'] != value) + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing != None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def main(): + + argument_spec = dict( + cas=dict(required=False), + flags=dict(required=False), + host=dict(default='localhost'), + key=dict(required=True), + port=dict(default=8500, type='int'), + recurse=dict(required=False, type='bool'), + retrieve=dict(required=False, default=True), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False, default='anonymous'), + value=dict(required=False) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_session b/clustering/consul_session new file mode 100644 index 00000000000..f11c5447e57 --- /dev/null +++ b/clustering/consul_session @@ -0,0 +1,213 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_session +short_description: "manipulate consul sessions" +description: + - allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found here http://www.consul.io/docs/internals/sessions.html +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the ID for the + session is returned in the output. If absent, the name or ID is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying info, node or list for the state; for node or info, the + node name or session id is required as parameter. + required: false + choices: ['present', 'absent', 'info', 'node', 'list'] + name: + description: + - the name that should be associated with the session. This is opaque + to Consul and not required. + required: false + delay: + description: + - the optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. + default: 15s + node: + description: + - the name of the node that with which the session will be associated. + by default this is the name of the agent. + datacenter: + description: + - name of the datacenter in which the session exists or should be + created. + checks: + description: + - a list of checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. +""" + +EXAMPLES = ''' + +''' + +import sys +import urllib2 + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. see "\ + "http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul.session.list(dc=datacenter) + #ditch the index, this can be grabbed from the results + if sessions_list and sessions_list[1]: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + if not node: + module.fail_json( + msg="node name is required to retrieve sessions for node") + sessions = consul.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + if not session_id: + module.fail_json( + msg="session_id is required to retrieve indvidual session info") + + session_by_id = consul.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception, e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + session_id = module.params.get('id') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + + consul = get_consul_api(module) + changed = True + + try: + + session = consul.session.create( + name=name, + node=node, + lock_delay=delay, + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + delay=delay, + checks=checks, + node=node) + except Exception, e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + if not session_id: + module.fail_json(msg="""A session id must be supplied in order to + remove a session.""") + + consul = get_consul_api(module) + changed = False + + try: + session = consul.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception, e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port')) + +def main(): + argument_spec = dict( + checks=dict(default=None, required=False, type='list'), + delay=dict(required=False,type='int', default=15), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + id=dict(required=False), + name=dict(required=False), + node=dict(required=False), + state=dict(default='present', + choices=['present', 'absent', 'info', 'node', 'list']) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() From c484d32a40b582c00cafef9f20d136188a1b24c2 Mon Sep 17 00:00:00 2001 From: Willy Barro Date: Sat, 24 Jan 2015 18:35:22 -0200 Subject: [PATCH 038/720] Add pushbullet module --- notification/pushbullet.py | 174 +++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 notification/pushbullet.py diff --git a/notification/pushbullet.py b/notification/pushbullet.py new file mode 100644 index 00000000000..d89c79ec941 --- /dev/null +++ b/notification/pushbullet.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +author: Willy Barro +requirements: [ pushbullet.py ] +module: pushbullet +short_description: Sends notifications to Pushbullet +description: + - This module sends push notifications via Pushbullet to channels or devices. +version_added: "1.8" +options: + api_key: + description: + - Push bullet API token + required: true + channel: + description: + - The channel TAG you wish to broadcast a push notification, + as seen on the "My Channels" > "Edit your channel" at + Pushbullet page. + required: false + default: null + device: + description: + - The device NAME you wish to send a push notification, + as seen on the Pushbullet main page. + required: false + default: null + push_type: + description: + - Thing you wish to push. + required: false + default: note + choices: [ "note", "link" ] + title: + description: + - Title of the notification. + required: true + body: + description: + - Body of the notification, e.g. Details of the fault you're alerting. + required: false + +notes: + - Requires pushbullet.py Python package on the remote host. + You can install it via pip with ($ pip install pushbullet.py). + See U(https://github.com/randomchars/pushbullet.py) +''' + +EXAMPLES = ''' +# Sends a push notification to a device +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + title: "You may see this on Google Chrome" + +# Sends a link to a device +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + push_type: "link" + title: "Ansible Documentation" + body: "http://docs.ansible.com/" + +# Sends a push notification to a channel +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + channel: "my-awesome-channel" + title: "Broadcasting a message to the #my-awesome-channel folks" + +# Sends a push notification with title and body to a channel +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + channel: "my-awesome-channel" + title: "ALERT! Signup service is down" + body: "Error rate on signup service is over 90% for more than 2 minutes" +''' + +try: + from pushbullet import PushBullet +except ImportError: + pushbullet_found = False +else: + pushbullet_found = True + +# =========================================== +# Main +# + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_key = dict(type='str', required=True), + channel = dict(type='str', default=None), + device = dict(type='str', default=None), + push_type = dict(type='str', default="note", choices=['note', 'link']), + title = dict(type='str', required=True), + body = dict(type='str', default=None) + ), + supports_check_mode=True + ) + + api_key = module.params['api_key'] + channel = module.params['channel'] + device = module.params['device'] + push_type = module.params['push_type'] + title = module.params['title'] + body = module.params['body'] + + if not pushbullet_found: + module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py") + + # Init pushbullet + pb = PushBullet(api_key) + target = None + + # Checks for channel/device + if device is not None and channel is not None: + module.fail_json(msg="You can't use both device and channel at the same time.") + + if device is None and channel is None: + module.fail_json(msg="You need to provide a channel or a device.") + + # Search for given device + if device is not None: + devices_by_nickname = {d.nickname: d for d in pb.devices} + + if device in devices_by_nickname: + target = devices_by_nickname[device] + else: + module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + + # Search for given channel + if channel is not None: + channels_by_tag = {c.channel_tag: c for c in pb.channels} + + if channel in channels_by_tag: + target = channels_by_tag[channel] + else: + module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + + # If in check mode, exit saying that we succeeded + if module.check_mode: + module.exit_json(changed=False) + + # Send push notification + success, result = target.push_note(title, body) + + if success: + module.exit_json(changed=True, msg="OK") + + # General failure + module.fail_json(msg="Some error ocurred, Pushbullet response: %s" % (result)) + +# import module snippets +from ansible.module_utils.basic import * +main() From 23495a16f4f16982ef30f2994c9e405c9276bc78 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 29 Jan 2015 10:32:09 +0100 Subject: [PATCH 039/720] fixed tab/space mix --- packaging/os/zypper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index c848e86fcc6..11f0380e81a 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -141,7 +141,7 @@ def get_package_state(m, packages): installed_state[package] = False for package in packages: - if package not in installed_state: + if package not in installed_state: print package + ' was not returned by rpm \n' return None From e1008059ecf9ac94902501b5942f5fd07278286b Mon Sep 17 00:00:00 2001 From: Julien Pepy Date: Mon, 18 Aug 2014 09:28:32 +0200 Subject: [PATCH 040/720] Clean options building in Composer module --- packaging/language/composer.py | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index f788f53dd5c..86863909be8 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -128,30 +128,34 @@ def main(): supports_check_mode=True ) - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + options = [] - options = set([]) # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") + options.append('--no-ansi') + options.append('--no-progress') + options.append('--no-interaction') - if module.check_mode: - options.add("--dry-run") - del module.params['CHECKMODE'] + options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) - # Get composer command with fallback to default + # Get composer command with fallback to default command = module.params['command'] - del module.params['command']; # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) + if module.params['prefer_source']: + options.append('--prefer-source') + if module.params['prefer_dist']: + options.append('--prefer-dist') + if module.params['no_dev']: + options.append('--no-dev') + if module.params['no_scripts']: + options.append('--no-scripts') + if module.params['no_plugins']: + options.append('--no-plugins') + if module.params['optimize_autoloader']: + options.append('--optimize-autoloader') + + if module.check_mode: + options.append('--dry-run') rc, out, err = composer_install(module, command, options) From 46f53724f0005411a6e2526aaef2ada3fc6d6af9 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Fri, 13 Feb 2015 13:37:16 -0500 Subject: [PATCH 041/720] Restore rax_mon_* modules. --- cloud/rackspace/rax_mon_alarm.py | 240 ++++++++++++++ cloud/rackspace/rax_mon_check.py | 323 +++++++++++++++++++ cloud/rackspace/rax_mon_entity.py | 196 +++++++++++ cloud/rackspace/rax_mon_notification.py | 187 +++++++++++ cloud/rackspace/rax_mon_notification_plan.py | 186 +++++++++++ 5 files changed, 1132 insertions(+) create mode 100644 cloud/rackspace/rax_mon_alarm.py create mode 100644 cloud/rackspace/rax_mon_check.py create mode 100644 cloud/rackspace/rax_mon_entity.py create mode 100644 cloud/rackspace/rax_mon_notification.py create mode 100644 cloud/rackspace/rax_mon_notification_plan.py diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py new file mode 100644 index 00000000000..f5fc9593abd --- /dev/null +++ b/cloud/rackspace/rax_mon_alarm.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_alarm +short_description: Create or delete a Rackspace Cloud Monitoring alarm. +description: +- Create or delete a Rackspace Cloud Monitoring alarm that associates an + existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with + criteria that specify what conditions will trigger which levels of + notifications. Rackspace monitoring module flow | rax_mon_entity -> + rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> + *rax_mon_alarm* +version_added: "1.8.2" +options: + state: + description: + - Ensure that the alarm with this C(label) exists or does not exist. + choices: [ "present", "absent" ] + required: false + default: present + label: + description: + - Friendly name for this alarm, used to achieve idempotence. Must be a String + between 1 and 255 characters long. + required: true + entity_id: + description: + - ID of the entity this alarm is attached to. May be acquired by registering + the value of a rax_mon_entity task. + required: true + check_id: + description: + - ID of the check that should be alerted on. May be acquired by registering + the value of a rax_mon_check task. + required: true + notification_plan_id: + description: + - ID of the notification plan to trigger if this alarm fires. May be acquired + by registering the value of a rax_mon_notification_plan task. + required: true + criteria: + description: + - Alarm DSL that describes alerting conditions and their output states. Must + be between 1 and 16384 characters long. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html + for a reference on the alerting language. + disabled: + description: + - If yes, create this alarm, but leave it in an inactive state. Defaults to + no. + choices: [ "yes", "no" ] + metadata: + description: + - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String + keys and values between 1 and 255 characters long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Alarm example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure that a specific alarm exists. + rax_mon_alarm: + credentials: ~/.rax_pub + state: present + label: uhoh + entity_id: "{{ the_entity['entity']['id'] }}" + check_id: "{{ the_check['check']['id'] }}" + notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" + criteria: > + if (rate(metric['average']) > 10) { + return new AlarmStatus(WARNING); + } + return new AlarmStatus(OK); + register: the_alarm +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, + disabled, metadata): + + # Verify the presence of required attributes. + + required_attrs = { + "label": label, "entity_id": entity_id, "check_id": check_id, + "notification_plan_id": notification_plan_id + } + + for (key, value) in required_attrs.iteritems(): + if not value: + module.fail_json(msg=('%s is required for rax_mon_alarm' % key)) + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if criteria and len(criteria) < 1 or len(criteria) > 16384: + module.fail_json(msg='criteria must be between 1 and 16384 characters long') + + # Coerce attributes. + + changed = False + alarm = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [a for a in cm.list_alarms(entity_id) if a.label == label] + + if existing: + alarm = existing[0] + + if state == 'present': + should_create = False + should_update = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s existing alarms have the label %s.' % + (len(existing), label)) + + if alarm: + if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: + should_delete = should_create = True + + should_update = (disabled and disabled != alarm.disabled) or \ + (metadata and metadata != alarm.metadata) or \ + (criteria and criteria != alarm.criteria) + + if should_update and not should_delete: + cm.update_alarm(entity=entity_id, alarm=alarm, + criteria=criteria, disabled=disabled, + label=label, metadata=metadata) + changed = True + + if should_delete: + alarm.delete() + changed = True + else: + should_create = True + + if should_create: + alarm = cm.create_alarm(entity=entity_id, check=check_id, + notification_plan=notification_plan_id, + criteria=criteria, disabled=disabled, label=label, + metadata=metadata) + changed = True + elif state == 'absent': + for a in existing: + a.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if alarm: + alarm_dict = { + "id": alarm.id, + "label": alarm.label, + "check_id": alarm.check_id, + "notification_plan_id": alarm.notification_plan_id, + "criteria": alarm.criteria, + "disabled": alarm.disabled, + "metadata": alarm.metadata + } + module.exit_json(changed=changed, alarm=alarm_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + entity_id=dict(), + check_id=dict(), + notification_plan_id=dict(), + criteria=dict(), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + entity_id = module.params.get('entity_id') + check_id = module.params.get('check_id') + notification_plan_id = module.params.get('notification_plan_id') + criteria = module.params.get('criteria') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + alarm(module, state, label, entity_id, check_id, notification_plan_id, + criteria, disabled, metadata) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py new file mode 100644 index 00000000000..9da283c3ba0 --- /dev/null +++ b/cloud/rackspace/rax_mon_check.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_check +short_description: Create or delete a Rackspace Cloud Monitoring check for an + existing entity. +description: +- Create or delete a Rackspace Cloud Monitoring check associated with an + existing rax_mon_entity. A check is a specific test or measurement that is + performed, possibly from different monitoring zones, on the systems you + monitor. Rackspace monitoring module flow | rax_mon_entity -> + *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> + rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that a check with this C(label) exists or does not exist. + choices: ["present", "absent"] + entity_id: + description: + - ID of the rax_mon_entity to target with this check. + required: true + label: + description: + - Defines a label for this check, between 1 and 64 characters long. + required: true + check_type: + description: + - The type of check to create. C(remote.) checks may be created on any + rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities + that have a non-null C(agent_id). + choices: + - remote.dns + - remote.ftp-banner + - remote.http + - remote.imap-banner + - remote.mssql-banner + - remote.mysql-banner + - remote.ping + - remote.pop3-banner + - remote.postgresql-banner + - remote.smtp-banner + - remote.smtp + - remote.ssh + - remote.tcp + - remote.telnet-banner + - agent.filesystem + - agent.memory + - agent.load_average + - agent.cpu + - agent.disk + - agent.network + - agent.plugin + required: true + monitoring_zones_poll: + description: + - Comma-separated list of the names of the monitoring zones the check should + run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, + mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. + target_hostname: + description: + - One of `target_hostname` and `target_alias` is required for remote.* checks, + but prohibited for agent.* checks. The hostname this check should target. + Must be a valid IPv4, IPv6, or FQDN. + target_alias: + description: + - One of `target_alias` and `target_hostname` is required for remote.* checks, + but prohibited for agent.* checks. Use the corresponding key in the entity's + `ip_addresses` hash to resolve an IP address to target. + details: + description: + - Additional details specific to the check type. Must be a hash of strings + between 1 and 255 characters long, or an array or object containing 0 to + 256 items. + disabled: + description: + - If "yes", ensure the check is created, but don't actually use it yet. + choices: [ "yes", "no" ] + metadata: + description: + - Hash of arbitrary key-value pairs to accompany this check if it fires. + Keys and values must be strings between 1 and 255 characters long. + period: + description: + - The number of seconds between each time the check is performed. Must be + greater than the minimum period set on your account. + timeout: + description: + - The number of seconds this check will wait when attempting to collect + results. Must be less than the period. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Create a monitoring check + gather_facts: False + hosts: local + connection: local + tasks: + - name: Associate a check with an existing entity. + rax_mon_check: + credentials: ~/.rax_pub + state: present + entity_id: "{{ the_entity['entity']['id'] }}" + label: the_check + check_type: remote.ping + monitoring_zones_poll: mziad,mzord,mzdfw + details: + count: 10 + meta: + hurf: durf + register: the_check +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout): + + # Verify the presence of required attributes. + + required_attrs = { + "entity_id": entity_id, "label": label, "check_type": check_type + } + + for (key, value) in required_attrs.iteritems(): + if not value: + module.fail_json(msg=('%s is required for rax_mon_check' % key)) + + # Coerce attributes. + + if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): + monitoring_zones_poll = [monitoring_zones_poll] + + if period: + period = int(period) + + if timeout: + timeout = int(timeout) + + changed = False + check = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + entity = cm.get_entity(entity_id) + if not entity: + module.fail_json(msg='Failed to instantiate entity. "%s" may not be' + ' a valid entity id.' % entity_id) + + existing = [e for e in entity.list_checks() if e.label == label] + + if existing: + check = existing[0] + + if state == 'present': + if len(existing) > 1: + module.fail_json(msg='%s existing checks have a label of %s.' % + (len(existing), label)) + + should_delete = False + should_create = False + should_update = False + + if check: + # Details may include keys set to default values that are not + # included in the initial creation. + # + # Only force a recreation of the check if one of the *specified* + # keys is missing or has a different value. + if details: + for (key, value) in details.iteritems(): + if key not in check.details: + should_delete = should_create = True + elif value != check.details[key]: + should_delete = should_create = True + + should_update = label != check.label or \ + (target_hostname and target_hostname != check.target_hostname) or \ + (target_alias and target_alias != check.target_alias) or \ + (disabled != check.disabled) or \ + (metadata and metadata != check.metadata) or \ + (period and period != check.period) or \ + (timeout and timeout != check.timeout) or \ + (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) + + if should_update and not should_delete: + check.update(label=label, + disabled=disabled, + metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, + timeout=timeout, + period=period, + target_alias=target_alias, + target_hostname=target_hostname) + changed = True + else: + # The check doesn't exist yet. + should_create = True + + if should_delete: + check.delete() + + if should_create: + check = cm.create_check(entity, + label=label, + check_type=check_type, + target_hostname=target_hostname, + target_alias=target_alias, + monitoring_zones_poll=monitoring_zones_poll, + details=details, + disabled=disabled, + metadata=metadata, + period=period, + timeout=timeout) + changed = True + elif state == 'absent': + if check: + check.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if check: + check_dict = { + "id": check.id, + "label": check.label, + "type": check.type, + "target_hostname": check.target_hostname, + "target_alias": check.target_alias, + "monitoring_zones_poll": check.monitoring_zones_poll, + "details": check.details, + "disabled": check.disabled, + "metadata": check.metadata, + "period": check.period, + "timeout": check.timeout + } + module.exit_json(changed=changed, check=check_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + entity_id=dict(), + label=dict(), + check_type=dict(), + monitoring_zones_poll=dict(), + target_hostname=dict(), + target_alias=dict(), + details=dict(type='dict', default={}), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict', default={}), + period=dict(type='int'), + timeout=dict(type='int'), + state=dict(default='present') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + entity_id = module.params.get('entity_id') + label = module.params.get('label') + check_type = module.params.get('check_type') + monitoring_zones_poll = module.params.get('monitoring_zones_poll') + target_hostname = module.params.get('target_hostname') + target_alias = module.params.get('target_alias') + details = module.params.get('details') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + period = module.params.get('period') + timeout = module.params.get('timeout') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout) + + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py new file mode 100644 index 00000000000..8b95c291914 --- /dev/null +++ b/cloud/rackspace/rax_mon_entity.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_entity +short_description: Create or delete a Rackspace Cloud Monitoring entity +description: +- Create or delete a Rackspace Cloud Monitoring entity, which represents a device + to monitor. Entities associate checks and alarms with a target system and + provide a convenient, centralized place to store IP addresses. Rackspace + monitoring module flow | *rax_mon_entity* -> rax_mon_check -> + rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "1.8.2" +options: + label: + description: + - Defines a name for this entity. Must be a non-empty string between 1 and + 255 characters long. + required: true + state: + description: + - Ensure that an entity with this C(name) exists or does not exist. + choices: ["present", "absent"] + agent_id: + description: + - Rackspace monitoring agent on the target device to which this entity is + bound. Necessary to collect C(agent.) rax_mon_checks against this entity. + named_ip_addresses: + description: + - Hash of IP addresses that may be referenced by name by rax_mon_checks + added to this entity. Must be a dictionary of with keys that are names + between 1 and 64 characters long, and values that are valid IPv4 or IPv6 + addresses. + metadata: + description: + - Hash of arbitrary C(name), C(value) pairs that are passed to associated + rax_mon_alarms. Names and values must all be between 1 and 255 characters + long. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Entity example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure an entity exists + rax_mon_entity: + credentials: ~/.rax_pub + state: present + label: my_entity + named_ip_addresses: + web_box: 192.168.0.10 + db_box: 192.168.0.11 + meta: + hurf: durf + register: the_entity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, + metadata): + if not label: + module.fail_json(msg='label is required for rax_mon_entity') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for entity in cm.list_entities(): + if label == entity.label: + existing.append(entity) + + entity = None + + if existing: + entity = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing entities have the label %s.' % + (len(existing), label)) + + if entity: + if named_ip_addresses and named_ip_addresses != entity.ip_addresses: + should_delete = should_create = True + + # Change an existing Entity, unless there's nothing to do. + should_update = agent_id and agent_id != entity.agent_id or \ + (metadata and metadata != entity.metadata) + + if should_update and not should_delete: + entity.update(agent_id, metadata) + changed = True + + if should_delete: + entity.delete() + else: + should_create = True + + if should_create: + # Create a new Entity. + entity = cm.create_entity(label=label, agent=agent_id, + ip_addresses=named_ip_addresses, + metadata=metadata) + changed = True + elif state == 'absent': + # Delete the existing Entities. + for e in existing: + e.delete() + changed = True + else: + module.fail_json(msg='state must be present or absent') + + if entity: + entity_dict = { + "id": entity.id, + "name": entity.name, + "agent_id": entity.agent_id, + } + module.exit_json(changed=changed, entity=entity_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + agent_id=dict(), + named_ip_addresses=dict(type='dict', default={}), + metadata=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + agent_id = module.params.get('agent_id') + named_ip_addresses = module.params.get('named_ip_addresses') + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py new file mode 100644 index 00000000000..74c4319255b --- /dev/null +++ b/cloud/rackspace/rax_mon_notification.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification +short_description: Create or delete a Rackspace Cloud Monitoring notification. +description: +- Create or delete a Rackspace Cloud Monitoring notification that specifies a + channel that can be used to communicate alarms, such as email, webhooks, or + PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> + *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that the notification with this C(label) exists or does not exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification. String between 1 and 255 + characters long. + required: true + notification_type: + description: + - A supported notification type. + choices: ["webhook", "email", "pagerduty"] + required: true + details: + description: + - Dictionary of key-value pairs used to initialize the notification. + Required keys and meanings vary with notification type. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ + service-notification-types-crud.html for details. + required: true +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Monitoring notification example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Email me when something goes wrong. + rax_mon_entity: + credentials: ~/.rax_pub + label: omg + type: email + details: + address: me@mailhost.com + register: the_notification +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification(module, state, label, notification_type, details): + + if not label: + module.fail_json(msg='label is required for rax_mon_notification') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if not notification_type: + module.fail_json(msg='you must provide a notification_type') + + if not details: + module.fail_json(msg='notification details are required') + + changed = False + notification = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notifications(): + if n.label == label: + existing.append(n) + + if existing: + notification = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing notifications are labelled %s.' % + (len(existing), label)) + + if notification: + should_delete = (notification_type != notification.type) + + should_update = (details != notification.details) + + if should_update and not should_delete: + notification.update(details=notification.details) + changed = True + + if should_delete: + notification.delete() + else: + should_create = True + + if should_create: + notification = cm.create_notification(notification_type, + label=label, details=details) + changed = True + elif state == 'absent': + for n in existing: + n.delete() + changed = True + else: + module.fail_json(msg='state must be either "present" or "absent"') + + if notification: + notification_dict = { + "id": notification.id, + "type": notification.type, + "label": notification.label, + "details": notification.details + } + module.exit_json(changed=changed, notification=notification_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + notification_type=dict(), + details=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + notification_type = module.params.get('notification_type') + details = module.params.get('details') + + setup_rax_module(module, pyrax) + + notification(module, state, label, notification_type, details) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py new file mode 100644 index 00000000000..c8d4d215292 --- /dev/null +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments +DOCUMENTATION = ''' +--- +module: rax_mon_notification_plan +short_description: Create or delete a Rackspace Cloud Monitoring notification + plan. +description: +- Create or delete a Rackspace Cloud Monitoring notification plan by + associating existing rax_mon_notifications with severity levels. Rackspace + monitoring module flow | rax_mon_entity -> rax_mon_check -> + rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm +version_added: "1.8.2" +options: + state: + description: + - Ensure that the notification plan with this C(label) exists or does not + exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification plan. String between 1 and + 255 characters long. + required: true + critical_state: + description: + - Notification list to use when the alarm state is CRITICAL. Must be an + array of valid rax_mon_notification ids. + warning_state: + description: + - Notification list to use when the alarm state is WARNING. Must be an array + of valid rax_mon_notification ids. + ok_state: + description: + - Notification list to use when the alarm state is OK. Must be an array of + valid rax_mon_notification ids. +author: Ash Wilson +extends_documentation_fragment: rackspace.openstack +''' + +EXAMPLES = ''' +- name: Example notification plan + gather_facts: False + hosts: local + connection: local + tasks: + - name: Establish who gets called when. + rax_mon_notification_plan: + credentials: ~/.rax_pub + state: present + label: defcon1 + critical_state: + - "{{ everyone['notification']['id'] }}" + warning_state: + - "{{ opsfloor['notification']['id'] }}" + register: defcon1 +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +def notification_plan(module, state, label, critical_state, warning_state, ok_state): + + if not label: + module.fail_json(msg='label is required for rax_mon_notification_plan') + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification_plan = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notification_plans(): + if n.label == label: + existing.append(n) + + if existing: + notification_plan = existing[0] + + if state == 'present': + should_create = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s notification plans are labelled %s.' % + (len(existing), label)) + + if notification_plan: + should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ + (warning_state and warning_state != notification_plan.warning_state) or \ + (ok_state and ok_state != notification_plan.ok_state) + + if should_delete: + notification_plan.delete() + should_create = True + else: + should_create = True + + if should_create: + notification_plan = cm.create_notification_plan(label=label, + critical_state=critical_state, + warning_state=warning_state, + ok_state=ok_state) + changed = True + elif state == 'absent': + for np in existing: + np.delete() + changed = True + else: + module.fail_json(msg='state must be either "present" or "absent"') + + if notification_plan: + notification_plan_dict = { + "id": notification_plan.id, + "critical_state": notification_plan.critical_state, + "warning_state": notification_plan.warning_state, + "ok_state": notification_plan.ok_state, + "metadata": notification_plan.metadata + } + module.exit_json(changed=changed, notification_plan=notification_plan_dict) + else: + module.exit_json(changed=changed) + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present'), + label=dict(), + critical_state=dict(type='list'), + warning_state=dict(type='list'), + ok_state=dict(type='list') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + critical_state = module.params.get('critical_state') + warning_state = module.params.get('warning_state') + ok_state = module.params.get('ok_state') + + setup_rax_module(module, pyrax) + + notification_plan(module, state, label, critical_state, warning_state, ok_state) + +# Import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +# Invoke the module. +main() From 14b62bb32ae122ca7d5fcb2f05c149da33e904c7 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Tue, 17 Feb 2015 16:56:15 +1100 Subject: [PATCH 042/720] Fix display of error message It was crashing due to "domain" variable not being defined --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 148e25a5011..c1f450b2e0f 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -292,7 +292,7 @@ def main(): if not "value" in new_record: if not current_record: module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) module.exit_json(changed=False, result=current_record) # create record as it does not exist From 671571b1e1b4527ae3355abedbac6c34b3c51f7f Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Tue, 17 Feb 2015 17:13:27 +1100 Subject: [PATCH 043/720] If record_value="" write empty value to dns made easy This is necessary for instance when setting CNAMEs that point to the root of the domain. This is different than leaving record_value out completely which has the same behaviour as before --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index c1f450b2e0f..86130f02103 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -275,7 +275,7 @@ def main(): current_record = DME.getRecordByName(record_name) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: - if module.params[i]: + if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] # Compare new record against existing one From fa2df8c7d5e63b5db4282c8a4e081c9711b95d5b Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Wed, 18 Feb 2015 10:42:07 +1100 Subject: [PATCH 044/720] If record_name="" write empty value to dns made easy This is necessary for instance when setting MX records on the root of a domain. This is different than leaving record_name out completely which has the same behaviour as before --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 86130f02103..c502bfc5ce8 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -264,7 +264,7 @@ def main(): record_name = module.params["record_name"] # Follow Keyword Controlled Behavior - if not record_name: + if record_name is None: domain_records = DME.getRecords() if not domain_records: module.fail_json( From 19b0c838192f49a4704e118fdd9457fd905df817 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Wed, 18 Feb 2015 12:14:58 +1100 Subject: [PATCH 045/720] Handle MX,NS,TXT records correctly and don't assume one record type per name --- network/dnsmadeeasy.py | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index c502bfc5ce8..b6320d65e6c 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -134,6 +134,7 @@ class DME2: self.domain_map = None # ["domain_name"] => ID self.record_map = None # ["record_name"] => ID self.records = None # ["record_ID"] => + self.all_records = None # Lookup the domain ID if passed as a domain name vs. ID if not self.domain.isdigit(): @@ -191,11 +192,33 @@ class DME2: return self.records.get(record_id, False) - def getRecordByName(self, record_name): - if not self.record_map: - self._instMap('record') - - return self.getRecord(self.record_map.get(record_name, 0)) + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + # TODO SRV type not yet implemented + if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["MX", "NS", "TXT"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') def getRecords(self): return self.query(self.record_url, 'GET')['data'] @@ -262,6 +285,8 @@ def main(): "account_secret"], module.params["domain"], module) state = module.params["state"] record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] # Follow Keyword Controlled Behavior if record_name is None: @@ -272,11 +297,15 @@ def main(): module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one - current_record = DME.getRecordByName(record_name) + current_record = DME.getMatchingRecord(record_name, record_type, record_value) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] # Compare new record against existing one changed = False From 4dfbafb339d9456d595efcdfd12219b6df0d8170 Mon Sep 17 00:00:00 2001 From: "chris.schmidt" Date: Wed, 18 Feb 2015 09:22:57 -0700 Subject: [PATCH 046/720] Removed version check from main as it is checked in the download function. Having the check here was breaking "latest version" functionality. --- packaging/maven_artifact.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index bf4ca59f92c..75837050892 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -349,8 +349,7 @@ def main(): prev_state = "absent" if os.path.isdir(dest): - dest = dest + "/" + artifact_id + "-" + version + ".jar" - + dest = dest + "/" + artifact_id + "-" + version + "." + extension if os.path.lexists(dest): prev_state = "present" else: From 8ba219ed78d09c2a1ce8c9d4e519e1ebe799fc2d Mon Sep 17 00:00:00 2001 From: "chris.schmidt" Date: Wed, 18 Feb 2015 09:24:21 -0700 Subject: [PATCH 047/720] Added check for "latest" in version field --- packaging/maven_artifact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 75837050892..699d97a54c2 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -250,7 +250,7 @@ class MavenDownloader: def download(self, artifact, filename=None): filename = artifact.get_filename(filename) - if not artifact.version: + if not artifact.version or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), artifact.classifier, artifact.extension) From 65a1129ef9800c2f094f07b84677d33b762337cb Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:18:35 -0600 Subject: [PATCH 048/720] Correct version_added in the documentation. --- cloud/rackspace/rax_mon_alarm.py | 2 +- cloud/rackspace/rax_mon_check.py | 2 +- cloud/rackspace/rax_mon_entity.py | 2 +- cloud/rackspace/rax_mon_notification.py | 2 +- cloud/rackspace/rax_mon_notification_plan.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f5fc9593abd..aa742d02bd8 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -27,7 +27,7 @@ description: notifications. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> *rax_mon_alarm* -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 9da283c3ba0..3f86da93ab6 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -28,7 +28,7 @@ description: monitor. Rackspace monitoring module flow | rax_mon_entity -> *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 8b95c291914..9d20252b0e5 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -26,7 +26,7 @@ description: provide a convenient, centralized place to store IP addresses. Rackspace monitoring module flow | *rax_mon_entity* -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: label: description: diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 74c4319255b..475eb345f51 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -25,7 +25,7 @@ description: channel that can be used to communicate alarms, such as email, webhooks, or PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index c8d4d215292..b81b00f7d18 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -26,7 +26,7 @@ description: associating existing rax_mon_notifications with severity levels. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -version_added: "1.8.2" +version_added: "1.9" options: state: description: From 205e4e5530699809713fdcada0ee477abb68fb50 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:25:51 -0600 Subject: [PATCH 049/720] Use required=True and choices=[]. --- cloud/rackspace/rax_mon_alarm.py | 10 +++++----- cloud/rackspace/rax_mon_check.py | 8 ++++---- cloud/rackspace/rax_mon_entity.py | 4 ++-- cloud/rackspace/rax_mon_notification.py | 8 ++++---- cloud/rackspace/rax_mon_notification_plan.py | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index aa742d02bd8..f4d2a9398a5 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -198,11 +198,11 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), - entity_id=dict(), - check_id=dict(), - notification_plan_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + entity_id=dict(required=True), + check_id=dict(required=True), + notification_plan_id=dict(required=True), criteria=dict(), disabled=dict(type='bool', default=False), metadata=dict(type='dict') diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 3f86da93ab6..27798e6cd5a 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -271,9 +271,9 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - entity_id=dict(), - label=dict(), - check_type=dict(), + entity_id=dict(required=True), + label=dict(required=True), + check_type=dict(required=True), monitoring_zones_poll=dict(), target_hostname=dict(), target_alias=dict(), @@ -282,7 +282,7 @@ def main(): metadata=dict(type='dict', default={}), period=dict(type='int'), timeout=dict(type='int'), - state=dict(default='present') + state=dict(default='present', choices=['present', 'absent']) ) ) diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 9d20252b0e5..b1bd13c61ad 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -161,8 +161,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), agent_id=dict(), named_ip_addresses=dict(type='dict', default={}), metadata=dict(type='dict', default={}) diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 475eb345f51..6962b14b3e6 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -154,10 +154,10 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), - notification_type=dict(), - details=dict(type='dict', default={}) + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), + details=dict(required=True, type='dict') ) ) diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index b81b00f7d18..1bb5052c8f2 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -151,8 +151,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present'), - label=dict(), + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), critical_state=dict(type='list'), warning_state=dict(type='list'), ok_state=dict(type='list') From c0549335135e33f1cbd49575e8e7428647a06e28 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 23 Feb 2015 14:33:02 -0600 Subject: [PATCH 050/720] Eliminate redundant module argument checks. --- cloud/rackspace/rax_mon_alarm.py | 15 +-------------- cloud/rackspace/rax_mon_check.py | 10 ---------- cloud/rackspace/rax_mon_entity.py | 6 +----- cloud/rackspace/rax_mon_notification.py | 13 +------------ cloud/rackspace/rax_mon_notification_plan.py | 7 +------ 5 files changed, 4 insertions(+), 47 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f4d2a9398a5..f9b97bc8dd1 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -105,17 +105,6 @@ except ImportError: def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, disabled, metadata): - # Verify the presence of required attributes. - - required_attrs = { - "label": label, "entity_id": entity_id, "check_id": check_id, - "notification_plan_id": notification_plan_id - } - - for (key, value) in required_attrs.iteritems(): - if not value: - module.fail_json(msg=('%s is required for rax_mon_alarm' % key)) - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -173,12 +162,10 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite criteria=criteria, disabled=disabled, label=label, metadata=metadata) changed = True - elif state == 'absent': + else: for a in existing: a.delete() changed = True - else: - module.fail_json(msg='state must be either present or absent.') if alarm: alarm_dict = { diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 27798e6cd5a..101efd3c858 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -141,16 +141,6 @@ def cloud_check(module, state, entity_id, label, check_type, monitoring_zones_poll, target_hostname, target_alias, details, disabled, metadata, period, timeout): - # Verify the presence of required attributes. - - required_attrs = { - "entity_id": entity_id, "label": label, "check_type": check_type - } - - for (key, value) in required_attrs.iteritems(): - if not value: - module.fail_json(msg=('%s is required for rax_mon_check' % key)) - # Coerce attributes. if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index b1bd13c61ad..5f82ff9c524 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -83,8 +83,6 @@ except ImportError: def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata): - if not label: - module.fail_json(msg='label is required for rax_mon_entity') if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -139,13 +137,11 @@ def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, ip_addresses=named_ip_addresses, metadata=metadata) changed = True - elif state == 'absent': + else: # Delete the existing Entities. for e in existing: e.delete() changed = True - else: - module.fail_json(msg='state must be present or absent') if entity: entity_dict = { diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 6962b14b3e6..8a21b088c5e 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -76,18 +76,9 @@ except ImportError: def notification(module, state, label, notification_type, details): - if not label: - module.fail_json(msg='label is required for rax_mon_notification') - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') - if not notification_type: - module.fail_json(msg='you must provide a notification_type') - - if not details: - module.fail_json(msg='notification details are required') - changed = False notification = None @@ -132,12 +123,10 @@ def notification(module, state, label, notification_type, details): notification = cm.create_notification(notification_type, label=label, details=details) changed = True - elif state == 'absent': + else: for n in existing: n.delete() changed = True - else: - module.fail_json(msg='state must be either "present" or "absent"') if notification: notification_dict = { diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index 1bb5052c8f2..05b89b2cfb3 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -80,9 +80,6 @@ except ImportError: def notification_plan(module, state, label, critical_state, warning_state, ok_state): - if not label: - module.fail_json(msg='label is required for rax_mon_notification_plan') - if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') @@ -128,12 +125,10 @@ def notification_plan(module, state, label, critical_state, warning_state, ok_st warning_state=warning_state, ok_state=ok_state) changed = True - elif state == 'absent': + else: for np in existing: np.delete() changed = True - else: - module.fail_json(msg='state must be either "present" or "absent"') if notification_plan: notification_plan_dict = { From e9c12bd8c5ae7722087ec682292128a85baa8b65 Mon Sep 17 00:00:00 2001 From: Matt Hite Date: Tue, 24 Feb 2015 16:55:22 -0800 Subject: [PATCH 051/720] Added session and monitor state enabling/disabling --- network/f5/bigip_node.py | 103 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 102 insertions(+), 1 deletion(-) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 68b6a2b52f1..c03245d42fa 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -61,6 +61,22 @@ options: default: present choices: ['present', 'absent'] aliases: [] + session_state: + description: + - Set new session availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] + monitor_state: + description: + - Set monitor availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] partition: description: - Partition @@ -137,6 +153,31 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" +# The BIG-IP GUI doesn't map directly to the API calls for "Node -> +# General Properties -> State". The following states map to API monitor +# and session states. +# +# Enabled (all traffic allowed): +# monitor_state=enabled, session_state=enabled +# Disabled (only persistent or active connections allowed): +# monitor_state=enabled, session_state=disabled +# Forced offline (only active connections allowed): +# monitor_state=disabled, session_state=disabled +# +# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down + + - name: Force node offline + local_action: > + bigip_node + server=lb.mydomain.com + user=admin + password=mysecret + state=present + session_state=disabled + monitor_state=disabled + partition=matthite + name="{{ ansible_default_ipv4["address"] }}" + ''' try: @@ -201,11 +242,32 @@ def delete_node_address(api, address): def set_node_description(api, name, description): api.LocalLB.NodeAddressV2.set_description(nodes=[name], - descriptions=[description]) + descriptions=[description]) def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] +def set_node_session_enabled_state(api, name, session_state): + session_state = "STATE_%s" % session_state.strip().upper() + api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], + states=[session_state]) + +def get_node_session_status(api, name): + result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] + result = result.split("SESSION_STATUS_")[-1].lower() + return result + +def set_node_monitor_state(api, name, monitor_state): + monitor_state = "STATE_%s" % monitor_state.strip().upper() + api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name], + states=[monitor_state]) + +def get_node_monitor_status(api, name): + result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0] + result = result.split("MONITOR_STATUS_")[-1].lower() + return result + + def main(): module = AnsibleModule( argument_spec = dict( @@ -213,6 +275,8 @@ def main(): user = dict(type='str', required=True), password = dict(type='str', required=True), state = dict(type='str', default='present', choices=['present', 'absent']), + session_state = dict(type='str', choices=['enabled', 'disabled']), + monitor_state = dict(type='str', choices=['enabled', 'disabled']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), @@ -228,6 +292,8 @@ def main(): user = module.params['user'] password = module.params['password'] state = module.params['state'] + session_state = module.params['session_state'] + monitor_state = module.params['monitor_state'] partition = module.params['partition'] host = module.params['host'] name = module.params['name'] @@ -264,6 +330,13 @@ def main(): module.fail_json(msg="unable to create: %s" % desc) else: result = {'changed': True} + if session_state is not None: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + if monitor_state is not None: + set_node_monitor_state(api, address, monitor_state) + result = {'changed': True} if description is not None: set_node_description(api, address, description) result = {'changed': True} @@ -277,6 +350,34 @@ def main(): module.fail_json(msg="Changing the node address is " \ "not supported by the API; " \ "delete and recreate the node.") + if session_state is not None: + session_status = get_node_session_status(api, address) + if session_state == 'enabled' and \ + session_status == 'forced_disabled': + if not module.check_mode: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + elif session_state == 'disabled' and \ + session_status != 'force_disabled': + if not module.check_mode: + set_node_session_enabled_state(api, address, + session_state) + result = {'changed': True} + if monitor_state is not None: + monitor_status = get_node_monitor_status(api, address) + if monitor_state == 'enabled' and \ + monitor_status == 'forced_down': + if not module.check_mode: + set_node_monitor_state(api, address, + monitor_state) + result = {'changed': True} + elif monitor_state == 'disabled' and \ + monitor_status != 'forced_down': + if not module.check_mode: + set_node_monitor_state(api, address, + monitor_state) + result = {'changed': True} if description is not None: if get_node_description(api, address) != description: if not module.check_mode: From 0cad9ee0bf407b9857515398fb667b9a6989cd2b Mon Sep 17 00:00:00 2001 From: Matt Hite Date: Wed, 25 Feb 2015 15:48:19 -0800 Subject: [PATCH 052/720] Support for monitor and session state manipulation added to bigip_pool_member module --- network/f5/bigip_pool_member.py | 94 +++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 5aef9f0ae98..fd2da21b80f 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -63,6 +63,22 @@ options: default: present choices: ['present', 'absent'] aliases: [] + session_state: + description: + - Set new session availability status for pool member + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] + monitor_state: + description: + - Set monitor availability status for pool member + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] pool: description: - Pool name. This pool must exist. @@ -172,6 +188,34 @@ EXAMPLES = ''' host="{{ ansible_default_ipv4["address"] }}" port=80 + + # The BIG-IP GUI doesn't map directly to the API calls for "Pool -> + # Members -> State". The following states map to API monitor + # and session states. + # + # Enabled (all traffic allowed): + # monitor_state=enabled, session_state=enabled + # Disabled (only persistent or active connections allowed): + # monitor_state=enabled, session_state=disabled + # Forced offline (only active connections allowed): + # monitor_state=disabled, session_state=disabled + # + # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down + + - name: Force pool member offline + local_action: > + bigip_pool_member + server=lb.mydomain.com + user=admin + password=mysecret + state=present + session_state=disabled + monitor_state=disabled + pool=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + ''' try: @@ -276,6 +320,28 @@ def set_ratio(api, pool, address, port, ratio): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) +def set_member_session_enabled_state(api, pool, address, port, session_state): + members = [{'address': address, 'port': port}] + session_state = ["STATE_%s" % session_state.strip().upper()] + api.LocalLB.Pool.set_member_session_enabled_state(pool_names=[pool], members=[members], session_states=[session_state]) + +def get_member_session_status(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_session_status(pool_names=[pool], members=[members])[0][0] + result = result.split("SESSION_STATUS_")[-1].lower() + return result + +def set_member_monitor_state(api, pool, address, port, monitor_state): + members = [{'address': address, 'port': port}] + monitor_state = ["STATE_%s" % monitor_state.strip().upper()] + api.LocalLB.Pool.set_member_monitor_state(pool_names=[pool], members=[members], monitor_states=[monitor_state]) + +def get_member_monitor_status(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_monitor_status(pool_names=[pool], members=[members])[0][0] + result = result.split("MONITOR_STATUS_")[-1].lower() + return result + def main(): module = AnsibleModule( argument_spec = dict( @@ -283,6 +349,8 @@ def main(): user = dict(type='str', required=True), password = dict(type='str', required=True), state = dict(type='str', default='present', choices=['present', 'absent']), + session_state = dict(type='str', choices=['enabled', 'disabled']), + monitor_state = dict(type='str', choices=['enabled', 'disabled']), pool = dict(type='str', required=True), partition = dict(type='str', default='Common'), host = dict(type='str', required=True, aliases=['address', 'name']), @@ -302,6 +370,8 @@ def main(): user = module.params['user'] password = module.params['password'] state = module.params['state'] + session_state = module.params['session_state'] + monitor_state = module.params['monitor_state'] partition = module.params['partition'] pool = "/%s/%s" % (partition, module.params['pool']) connection_limit = module.params['connection_limit'] @@ -347,6 +417,10 @@ def main(): set_rate_limit(api, pool, address, port, rate_limit) if ratio is not None: set_ratio(api, pool, address, port, ratio) + if session_state is not None: + set_member_session_enabled_state(api, pool, address, port, session_state) + if monitor_state is not None: + set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} else: # pool member exists -- potentially modify attributes @@ -366,6 +440,26 @@ def main(): if not module.check_mode: set_ratio(api, pool, address, port, ratio) result = {'changed': True} + if session_state is not None: + session_status = get_member_session_status(api, pool, address, port) + if session_state == 'enabled' and session_status == 'forced_disabled': + if not module.check_mode: + set_member_session_enabled_state(api, pool, address, port, session_state) + result = {'changed': True} + elif session_state == 'disabled' and session_status != 'force_disabled': + if not module.check_mode: + set_member_session_enabled_state(api, pool, address, port, session_state) + result = {'changed': True} + if monitor_state is not None: + monitor_status = get_member_monitor_status(api, pool, address, port) + if monitor_state == 'enabled' and monitor_status == 'forced_down': + if not module.check_mode: + set_member_monitor_state(api, pool, address, port, monitor_state) + result = {'changed': True} + elif monitor_state == 'disabled' and monitor_status != 'forced_down': + if not module.check_mode: + set_member_monitor_state(api, pool, address, port, monitor_state) + result = {'changed': True} except Exception, e: module.fail_json(msg="received exception: %s" % e) From 0d7647d904580351a87824287fc88c25bf817a10 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sat, 28 Feb 2015 15:23:23 +0000 Subject: [PATCH 053/720] remove debug imports from acl module --- clustering/consul_acl | 3 --- 1 file changed, 3 deletions(-) diff --git a/clustering/consul_acl b/clustering/consul_acl index ae3efe5787f..fc997400ae9 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -92,8 +92,6 @@ except ImportError: " see https://pypi.python.org/pypi/pyhcl'" sys.exit(1) -import epdb - def execute(module): @@ -216,7 +214,6 @@ class Rules: return len(self.rules) > 0 def to_json(self): - # import epdb; epdb.serve() rules = {} for key, rule in self.rules.iteritems(): rules[key] = {'policy': rule.policy} From a8584ade957004ad43a5c18e172f5a843569a739 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Tue, 3 Mar 2015 14:18:56 +0000 Subject: [PATCH 054/720] fix logic that tests for change in an existing registered service --- clustering/consul | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul b/clustering/consul index fa1e06c3678..8aa2ce1fe4c 100644 --- a/clustering/consul +++ b/clustering/consul @@ -224,7 +224,7 @@ def add_service(module, service): # there is no way to retreive the details of checks so if a check is present # in the service it must be reregistered - if service.has_checks() or not(existing or existing == service): + if service.has_checks() or not existing or not existing == service: service.register(consul_api) # check that it registered correctly From 0c6d426c40932ec55a70ee96ec24f5131f46e2af Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Tue, 3 Mar 2015 20:03:46 +0000 Subject: [PATCH 055/720] require a valid duration suffix for interval and ttl values --- clustering/consul | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/clustering/consul b/clustering/consul index 8aa2ce1fe4c..24df908c45c 100644 --- a/clustering/consul +++ b/clustering/consul @@ -375,21 +375,21 @@ class ConsulCheck(): if check_id: self.check_id = check_id self.script = script - self.interval = str(interval) - - if not self.interval.endswith('m') or self.interval.endswith('s'): - self.interval += 'm' - - self.ttl = ttl + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) self.notes = notes self.node = node self.host = host - if interval and interval <= 0: - raise Error('check interval must be positive') + - if ttl and ttl <= 0: - raise Error('check ttl value must be positive') + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration def register(self, consul_api): consul_api.agent.check.register(self.name, check_id=self.check_id, @@ -434,7 +434,8 @@ def main(): check_id=dict(required=False), check_name=dict(required=False), host=dict(default='localhost'), - interval=dict(required=False, default='1m'), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), check_node=dict(required=False), check_host=dict(required=False), notes=dict(required=False), From 34e7d9c938ef8ea0d9f754fe9d4328f50c1c1dae Mon Sep 17 00:00:00 2001 From: David Wittman Date: Fri, 13 Mar 2015 21:09:33 -0500 Subject: [PATCH 056/720] Fix multiple issues with alternatives module - Changes are no longer erroneously reported on RHEL (#12) - Adding new link groups on Debian works again. - This was broken in a previous commit by assuming the OS was RHEL if `update-alternatives --query ` had a return code of 2 - Prefer `--display` over `--query` for determining available alternatives - --display is more distro-agnostic and simplifies the code - Fix missing `msg=` in `fail_json` call when `link` is missing - Document that `link` is required on RHEL-based distros Tested on Ubuntu 12.04+ and CentOS 6/7 --- system/alternatives.py | 105 +++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 62 deletions(-) diff --git a/system/alternatives.py b/system/alternatives.py index 871a494e87d..ff4de59cf11 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -4,6 +4,7 @@ """ Ansible module to manage symbolic link alternatives. (c) 2014, Gabe Mulley +(c) 2015, David Wittman This file is part of Ansible @@ -26,7 +27,7 @@ DOCUMENTATION = ''' module: alternatives short_description: Manages alternative programs for common commands description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" options: @@ -41,6 +42,7 @@ options: link: description: - The path to the symbolic link that should point to the real executable. + - This option is required on RHEL-based distributions required: false requirements: [ update-alternatives ] ''' @@ -55,12 +57,14 @@ EXAMPLES = ''' DEFAULT_LINK_PRIORITY = 50 +import re + def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - path = dict(required=True), + path = dict(required=True), link = dict(required=False), ), supports_check_mode=True, @@ -71,78 +75,55 @@ def main(): path = params['path'] link = params['link'] - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) current_path = None all_alternatives = [] - os_family = None - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, _) = module.run_command( + [UPDATE_ALTERNATIVES, '--display', name] ) - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this on Debian systems: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - if rc == 0: - os_family = "Debian" - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - elif rc == 2: - os_family = "RedHat" - # This is the version of update-alternatives that is shipped with - # chkconfig on RedHat-based systems. Try again with the right options. - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--list'] - ) - for line in query_output.splitlines(): - line_name, line_mode, line_path = line.strip().split("\t") - if line_name != name: - continue - current_path = line_path - break + # Alternatives already exist for this link group + # Parse the output to determine the current path of the symlink and + # available alternatives + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', + re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) + + current_path = current_path_regex.search(display_output).group(1) + all_alternatives = alternative_regex.findall(display_output) + + if not link: + # Read the current symlink target from `update-alternatives --query` + # in case we need to install the new alternative before setting it. + # + # This is only compatible on Debian-based systems, as the other + # alternatives don't have --query available + rc, query_output, _ = module.run_command( + [UPDATE_ALTERNATIVES, '--query', name] + ) + if rc == 0: + for line in query_output.splitlines(): + if line.startswith('Link:'): + link = line.split()[1] + break if current_path != path: if module.check_mode: module.exit_json(changed=True, current_path=current_path) try: # install the requested path if necessary - # (unsupported on the RedHat version) - if path not in all_alternatives and os_family == "Debian": - if link: - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) - else: - module.fail_json("Needed to install the alternative, but unable to do so, as we are missking the link") + if path not in all_alternatives: + if not link: + module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") + + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) # select the requested path module.run_command( @@ -151,7 +132,7 @@ def main(): ) module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: + except subprocess.CalledProcessError as cpe: module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) From 8084671e33296043c47a362018f32a83d471e691 Mon Sep 17 00:00:00 2001 From: Kevin Klinemeier Date: Sun, 15 Mar 2015 21:42:35 -0700 Subject: [PATCH 057/720] Updated tags example to an actual datadog tag --- monitoring/datadog_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 5d38dd4c31d..b481345fab9 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -71,7 +71,7 @@ datadog_event: title="Testing from ansible" text="Test!" priority="low" # Post an event with several tags datadog_event: title="Testing from ansible" text="Test!" api_key="6873258723457823548234234234" - tags=aa,bb,cc + tags=aa,bb,#host:{{ inventory_hostname }} ''' import socket From b553f59a54dbf7a0cae58a6d42054cc74d593c55 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Mon, 16 Mar 2015 16:50:53 +0000 Subject: [PATCH 058/720] Properly report exception causes particularly connection exceptions contacting the consul agent --- clustering/consul | 11 ++++++----- clustering/consul_acl | 11 ++++++----- clustering/consul_kv | 12 +++++++----- clustering/consul_session | 11 ++++++----- 4 files changed, 25 insertions(+), 20 deletions(-) diff --git a/clustering/consul b/clustering/consul index 24df908c45c..15a68f068a2 100644 --- a/clustering/consul +++ b/clustering/consul @@ -143,6 +143,7 @@ except ImportError, e: "see http://python-consul.readthedocs.org/en/latest/#installation'" sys.exit(1) +from requests.exceptions import ConnectionError def register_with_consul(module): @@ -453,11 +454,11 @@ def main(): ) try: register_with_consul(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_acl b/clustering/consul_acl index fc997400ae9..cd5466c53b1 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -92,6 +92,7 @@ except ImportError: " see https://pypi.python.org/pypi/pyhcl'" sys.exit(1) +from requests.exceptions import ConnectionError def execute(module): @@ -284,11 +285,11 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_kv b/clustering/consul_kv index 6a2b77ea7c6..8999a43319f 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -117,6 +117,7 @@ except ImportError, e: see http://python-consul.readthedocs.org/en/latest/#installation'""" sys.exit(1) +from requests.exceptions import ConnectionError def execute(module): @@ -227,11 +228,12 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_session b/clustering/consul_session index f11c5447e57..00f4cae7344 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -80,6 +80,7 @@ except ImportError, e: "http://python-consul.readthedocs.org/en/latest/#installation'" sys.exit(1) +from requests.errors import ConnectionError def execute(module): @@ -202,11 +203,11 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * From aef5792772d267d243ba17f7451735fb4dc1f291 Mon Sep 17 00:00:00 2001 From: Todd Zullinger Date: Wed, 18 Mar 2015 15:07:56 -0400 Subject: [PATCH 059/720] monitoring/nagios: Allow comment to be specified The default remains 'Scheduling downtime' but can be overridden. --- monitoring/nagios.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index c564e712b04..497d0bc19f7 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -51,6 +51,11 @@ options: Only usable with the C(downtime) action. required: false default: Ansible + comment: + description: + - Comment for C(downtime) action. + required: false + default: Scheduling downtime minutes: description: - Minutes to schedule downtime for. @@ -84,6 +89,10 @@ EXAMPLES = ''' # schedule an hour of HOST downtime - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} +# schedule an hour of HOST downtime, with a comment describing the reason +- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + comment='This host needs disciplined' + # schedule downtime for ALL services on HOST - nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} @@ -175,6 +184,7 @@ def main(): argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), + comment=dict(default='Scheduling downtime'), host=dict(required=False, default=None), minutes=dict(default=30), cmdfile=dict(default=which_cmdfile()), @@ -258,6 +268,7 @@ class Nagios(object): self.module = module self.action = kwargs['action'] self.author = kwargs['author'] + self.comment = kwargs['comment'] self.host = kwargs['host'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] @@ -293,7 +304,7 @@ class Nagios(object): cmdfile=self.cmdfile) def _fmt_dt_str(self, cmd, host, duration, author=None, - comment="Scheduling downtime", start=None, + comment=None, start=None, svc=None, fixed=1, trigger=0): """ Format an external-command downtime string. @@ -326,6 +337,9 @@ class Nagios(object): if not author: author = self.author + if not comment: + comment = self.comment + if svc is not None: dt_args = [svc, str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] From cb848fcd9ec8364210fc05a5a7addd955b8a2529 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 20:23:05 -0700 Subject: [PATCH 060/720] Make our regex match the homebrew tap upstream regex. Fixes #312 Fixes #297 --- packaging/os/homebrew_tap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index a79ba076a8a..d329227b980 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -52,7 +52,7 @@ homebrew_tap: tap=homebrew/dupes,homebrew/science state=present def a_valid_tap(tap): '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') return regex.match(tap) From 24cfcd2497f1d68cfffaef6f5afa355018f663c2 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Wed, 18 Mar 2015 23:33:33 -0500 Subject: [PATCH 061/720] Updated lxc_container module to fix option parsing The option parsing object within the module was performing a split on an '=' sign and assuming that there would only ever be one '=' in a user provided option. Sadly, the assumption is incorrect and the list comprehension that is building the options list needs to be set to split on the first occurrence of an '=' sign in a given option string. This commit adds the required change to make it possible for options to contain additional '=' signs and be handled correctly. --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..c5b290827bf 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -616,7 +616,7 @@ class LxcContainerManagement(object): # TODO(cloudnull) adjust import when issue has been resolved. import ast options_dict = ast.literal_eval(_container_config) - parsed_options = [i.split('=') for i in options_dict] + parsed_options = [i.split('=', 1) for i in options_dict] config_change = False for key, value in parsed_options: From c622c54e9e2d4071a3c8c936d8f86df8b10b7c3f Mon Sep 17 00:00:00 2001 From: HPLogsdon Date: Thu, 19 Mar 2015 17:11:42 -0600 Subject: [PATCH 062/720] Fix typo in hipchat notification failure message. Wish it could be more substantial, but alas, it's just a typo in a string. --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 4ff95b32bf6..24fde9ecb35 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -137,7 +137,7 @@ def main(): try: send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) + module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) From b01c2cee66d720cf2f503052d5275bdff06a2f32 Mon Sep 17 00:00:00 2001 From: Ben Copeland Date: Fri, 20 Mar 2015 11:11:43 +0000 Subject: [PATCH 063/720] Added example block for the "SMTP username and password" --- notification/mail.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/notification/mail.py b/notification/mail.py index aa3345b4f98..92565016ad8 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -116,6 +116,16 @@ EXAMPLES = ''' # Example playbook sending mail to root - local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +# Sending an e-mail using Gmail SMTP servers +- local_action: mail + host='smtp.gmail.com' + port=587 + username=username@gmail.com + password='mysecret' + to="John Smith " + subject='Ansible-report' + msg='System {{ ansible_hostname }} has been successfully provisioned.' + # Send e-mail to a bunch of users, attaching files - local_action: mail host='127.0.0.1' From b68c136010e4a7113f65a7a7bf5c52dc29c35b91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:17:16 +0100 Subject: [PATCH 064/720] [patch] Make sure patch command is found on remote system. --- files/patch.py | 2 ++ 1 file changed, 2 insertions(+) mode change 100644 => 100755 files/patch.py diff --git a/files/patch.py b/files/patch.py old mode 100644 new mode 100755 index cd4b3130079..314a1bc37db --- a/files/patch.py +++ b/files/patch.py @@ -141,6 +141,8 @@ def main(): p.basedir = path.dirname(p.dest) patch_bin = module.get_bin_path('patch') + if patch_bin is None: + module.fail_json(msg="patch command not found") patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) changed = False From 84eb895a06c8819ed5d8aea06672a69add2b9d37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:19:15 +0100 Subject: [PATCH 065/720] [patch] Make sure the absolute patch file is passed to the patch command. According the patch(1) manpage: The --directory option change to the directory dir immediately, before doing anything else. Thus if file is not relative to dir and making file absolute ensure that patch will find it. --- files/patch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/patch.py b/files/patch.py index 314a1bc37db..78bcefe6d31 100755 --- a/files/patch.py +++ b/files/patch.py @@ -145,6 +145,9 @@ def main(): module.fail_json(msg="patch command not found") patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) + # patch need an absolute file name + p.src = os.path.abspath(p.src) + changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: From 08702e44bf5dfb1cee66b0b8cebd6717f9a366fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:36:33 +0100 Subject: [PATCH 066/720] [patch] Update documentation for src parameter. --- files/patch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/files/patch.py b/files/patch.py index 78bcefe6d31..2f2894a6508 100755 --- a/files/patch.py +++ b/files/patch.py @@ -43,7 +43,9 @@ options: aliases: [ "originalfile" ] src: description: - - Path of the patch file as accepted by the GNU patch tool. + - Path of the patch file as accepted by the GNU patch tool. If + C(remote_src) is False, the patch source file is looked up from the + module's "files" directory. required: true aliases: [ "patchfile" ] remote_src: From f8d04bec1bbdfb0e61e6d3255b16b5bfe23b42f1 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Sun, 22 Mar 2015 22:16:37 -0500 Subject: [PATCH 067/720] system/lvol: Suppress prompts from lvcreate Occasionally, `lvcreate` will prompt on stdin for confirmation. In particular, this may happen when the volume is being created close to the location on disk where another volume existed previously. When this happens, Ansible will hang indefinitely with no indication of the problem. To work prevent this problem, the `--yes` command-line argument can be passed to `lvcreate`, which will instruct it not to prompt. Signed-off-by: Dustin C. Hatch --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d9be9e7dc70..b14fd33c8e4 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -187,7 +187,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s --yes -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From 2f188600a8239ddd31407cae01b76579280d627e Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Thu, 19 Mar 2015 08:45:28 -0500 Subject: [PATCH 068/720] Added overlayfs backend type to the lxc_container module This commit adds the overlayfs type to the lxc_container module. In Adding the overlayfs type the commit adds the ability to clone a container. While cloning is not locked down to only the overlayfs container backend it is of particular interest when using the overlayfs backend as it provides for amazingly fast snapshots. Changes to the resource types and documentation have been added on how the new backend type can be used along with the clone operation. This PR addresses a question asked on the original merged pull request for overlayfs support which came from @fghaas on PR "https://github.com/ansible/ansible-modules-extras/pull/123". The overlayfs archive function is a first class function and will allow for the containers to be backed-up using all methods which brings support up to that of all other storage backends. --- cloud/lxc/lxc_container.py | 315 +++++++++++++++++++++++++++++++------ 1 file changed, 263 insertions(+), 52 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..7031f774714 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -38,6 +38,7 @@ options: - lvm - loop - btrfs + - overlayfs description: - Backend storage type for the container. required: false @@ -112,6 +113,20 @@ options: - Set the log level for a container where *container_log* was set. required: false default: INFO + clone_name: + description: + - Name of the new cloned server. This is only used when state is + clone. + required: false + clone_snapshot: + choices: + - true + - false + description: + - Create a snapshot a container when cloning. This is not supported + by all container storage backends. Enabling this may fail if the + backing store does not support snapshots. + default: false archive: choices: - true @@ -141,8 +156,12 @@ options: - restarted - absent - frozen + - clone description: - - Start a container right after it's created. + - Define the state of a container. If you use clone the container + will be stopped while the clone operation is happening and upon + completion of the clone the original container state will be + restored. required: false default: started container_config: @@ -295,6 +314,47 @@ EXAMPLES = """ archive: true archive_path: /opt/archives +- name: Create an overlayfs container + lxc_container: + name: test-container-overlayfs + container_log: true + template: ubuntu + state: started + backing_store: overlayfs + template_options: --release trusty + +- name: Clone a container + lxc_container: + name: test-container-overlayfs + clone_name: test-container-clone + state: clone + +- name: Clone a container using snapshot. + lxc_container: + name: test-container-overlayfs + clone_name: test-container-overlayfs-clone + backing_store: overlayfs + clone_snapshot: true + state: clone + +- name: Create a new container and clone it + lxc_container: + name: test-container-new-overlayfs + clone_name: test-container-new-overlayfs-clone + backing_store: overlayfs + clone_snapshot: true + state: clone + +- name: Create a new container, clone it, and archive + lxc_container: + name: test-container-new-overlayfs + clone_name: test-container-new-overlayfs-clone + backing_store: overlayfs + clone_snapshot: true + state: clone + archive: true + archive_compression: gzip + - name: Destroy a container. lxc_container: name: "{{ item }}" @@ -305,6 +365,9 @@ EXAMPLES = """ - test-container-frozen - test-container-lvm - test-container-config + - test-container-overlayfs + - test-container-clone + - test-container-overlayfs-clone """ @@ -351,6 +414,15 @@ LXC_COMMAND_MAP = { 'directory': '--dir', 'zfs_root': '--zfsroot' } + }, + 'clone': { + 'variables': { + 'backing_store': '--backingstore', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--orig', + 'clone_name': '--new' + } } } @@ -369,6 +441,9 @@ LXC_BACKING_STORE = { ], 'loop': [ 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'overlayfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' ] } @@ -388,7 +463,8 @@ LXC_ANSIBLE_STATES = { 'stopped': '_stopped', 'restarted': '_restarted', 'absent': '_destroyed', - 'frozen': '_frozen' + 'frozen': '_frozen', + 'clone': '_clone' } @@ -502,15 +578,15 @@ class LxcContainerManagement(object): return num @staticmethod - def _container_exists(name): + def _container_exists(container_name): """Check if a container exists. - :param name: Name of the container. + :param container_name: Name of the container. :type: ``str`` :returns: True or False if the container is found. :rtype: ``bol`` """ - if [i for i in lxc.list_containers() if i == name]: + if [i for i in lxc.list_containers() if i == container_name]: return True else: return False @@ -543,6 +619,7 @@ class LxcContainerManagement(object): """ # Remove incompatible storage backend options. + variables = variables.copy() for v in LXC_BACKING_STORE[self.module.params['backing_store']]: variables.pop(v, None) @@ -655,6 +732,83 @@ class LxcContainerManagement(object): self._container_startup() self.container.freeze() + def _clone(self, count=0): + """Clone a new LXC container from an existing container. + + This method will clone an existing container to a new container using + the `clone_name` variable as the new container name. The method will + create a container if the container `name` does not exist. + + Note that cloning a container will ensure that the original container + is "stopped" before the clone can be done. Because this operation can + require a state change the method will return the original container + to its prior state upon completion of the clone. + + Once the clone is complete the new container will be left in a stopped + state. + """ + + self.check_count(count=count, method='clone') + if self._container_exists(container_name=self.container_name): + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() + + build_command = [ + self.module.get_bin_path('lxc-clone', True), + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: + build_command.append('--snapshot') + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + message = "Failed executing lxc-clone." + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command + ) + ) + else: + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + # Change the container name context to the new cloned container + # This enforces that the state of the new cloned container will be + # "stopped". + self.state = 'stopped' + self.container_name = self.module.params['clone_name'] + self.container = self.get_container_bind() + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._clone(count) + def _create(self): """Create a new LXC container. @@ -709,9 +863,9 @@ class LxcContainerManagement(object): rc, return_data, err = self._run_command(build_command) if rc != 0: - msg = "Failed executing lxc-create." + message = "Failed executing lxc-create." self.failure( - err=err, rc=rc, msg=msg, command=' '.join(build_command) + err=err, rc=rc, msg=message, command=' '.join(build_command) ) else: self.state_change = True @@ -751,7 +905,7 @@ class LxcContainerManagement(object): :rtype: ``str`` """ - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): return str(self.container.state).lower() else: return str('absent') @@ -816,7 +970,7 @@ class LxcContainerManagement(object): """ for _ in xrange(timeout): - if not self._container_exists(name=self.container_name): + if not self._container_exists(container_name=self.container_name): break # Check if the container needs to have an archive created. @@ -852,7 +1006,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='frozen') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -886,7 +1040,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='restart') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -913,7 +1067,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='stop') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): self._execute_command() # Perform any configuration updates @@ -940,7 +1094,7 @@ class LxcContainerManagement(object): """ self.check_count(count=count, method='start') - if self._container_exists(name=self.container_name): + if self._container_exists(container_name=self.container_name): container_state = self._get_state() if container_state == 'running': pass @@ -1007,18 +1161,18 @@ class LxcContainerManagement(object): all_lvms = [i.split() for i in stdout.splitlines()][1:] return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] - def _get_vg_free_pe(self, name): + def _get_vg_free_pe(self, vg_name): """Return the available size of a given VG. - :param name: Name of volume. - :type name: ``str`` + :param vg_name: Name of volume. + :type vg_name: ``str`` :returns: size and measurement of an LV :type: ``tuple`` """ build_command = [ 'vgdisplay', - name, + vg_name, '--units', 'g' ] @@ -1027,7 +1181,7 @@ class LxcContainerManagement(object): self.failure( err=err, rc=rc, - msg='failed to read vg %s' % name, + msg='failed to read vg %s' % vg_name, command=' '.join(build_command) ) @@ -1036,17 +1190,17 @@ class LxcContainerManagement(object): _free_pe = free_pe[0].split() return float(_free_pe[-2]), _free_pe[-1] - def _get_lv_size(self, name): + def _get_lv_size(self, lv_name): """Return the available size of a given LV. - :param name: Name of volume. - :type name: ``str`` + :param lv_name: Name of volume. + :type lv_name: ``str`` :returns: size and measurement of an LV :type: ``tuple`` """ vg = self._get_lxc_vg() - lv = os.path.join(vg, name) + lv = os.path.join(vg, lv_name) build_command = [ 'lvdisplay', lv, @@ -1080,7 +1234,7 @@ class LxcContainerManagement(object): """ vg = self._get_lxc_vg() - free_space, messurement = self._get_vg_free_pe(name=vg) + free_space, messurement = self._get_vg_free_pe(vg_name=vg) if free_space < float(snapshot_size_gb): message = ( @@ -1183,25 +1337,25 @@ class LxcContainerManagement(object): return archive_name - def _lvm_lv_remove(self, name): + def _lvm_lv_remove(self, lv_name): """Remove an LV. - :param name: The name of the logical volume - :type name: ``str`` + :param lv_name: The name of the logical volume + :type lv_name: ``str`` """ vg = self._get_lxc_vg() build_command = [ self.module.get_bin_path('lvremove', True), "-f", - "%s/%s" % (vg, name), + "%s/%s" % (vg, lv_name), ] rc, stdout, err = self._run_command(build_command) if rc != 0: self.failure( err=err, rc=rc, - msg='Failed to remove LVM LV %s/%s' % (vg, name), + msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), command=' '.join(build_command) ) @@ -1213,31 +1367,71 @@ class LxcContainerManagement(object): :param temp_dir: path to the temporary local working directory :type temp_dir: ``str`` """ + # This loop is created to support overlayfs archives. This should + # squash all of the layers into a single archive. + fs_paths = container_path.split(':') + if 'overlayfs' in fs_paths: + fs_paths.pop(fs_paths.index('overlayfs')) + + for fs_path in fs_paths: + # Set the path to the container data + fs_path = os.path.dirname(fs_path) + + # Run the sync command + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + fs_path, + temp_dir + ] + rc, stdout, err = self._run_command( + build_command, + unsafe_shell=True + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ build_command = [ - self.module.get_bin_path('rsync', True), - '-aHAX', - container_path, - temp_dir + self.module.get_bin_path('umount', True), + mount_point, ] - rc, stdout, err = self._run_command(build_command, unsafe_shell=True) + rc, stdout, err = self._run_command(build_command) if rc != 0: self.failure( err=err, rc=rc, - msg='failed to perform archive', + msg='failed to unmount [ %s ]' % mount_point, command=' '.join(build_command) ) - def _unmount(self, mount_point): - """Unmount a file system. + def _overlayfs_mount(self, lowerdir, upperdir, mount_point): + """mount an lv. + :param lowerdir: name/path of the lower directory + :type lowerdir: ``str`` + :param upperdir: name/path of the upper directory + :type upperdir: ``str`` :param mount_point: path on the file system that is mounted. :type mount_point: ``str`` """ build_command = [ - self.module.get_bin_path('umount', True), + self.module.get_bin_path('mount', True), + '-t overlayfs', + '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), + 'overlayfs', mount_point, ] rc, stdout, err = self._run_command(build_command) @@ -1245,8 +1439,8 @@ class LxcContainerManagement(object): self.failure( err=err, rc=rc, - msg='failed to unmount [ %s ]' % mount_point, - command=' '.join(build_command) + msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' + % (lowerdir, upperdir, mount_point, build_command) ) def _container_create_tar(self): @@ -1275,13 +1469,15 @@ class LxcContainerManagement(object): # Test if the containers rootfs is a block device block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + + # Test if the container is using overlayfs + overlayfs_backed = lxc_rootfs.startswith('overlayfs') + mount_point = os.path.join(work_dir, 'rootfs') # Set the snapshot name if needed snapshot_name = '%s_lxc_snapshot' % self.container_name - # Set the path to the container data - container_path = os.path.dirname(lxc_rootfs) container_state = self._get_state() try: # Ensure the original container is stopped or frozen @@ -1292,7 +1488,7 @@ class LxcContainerManagement(object): self.container.stop() # Sync the container data from the container_path to work_dir - self._rsync_data(container_path, temp_dir) + self._rsync_data(lxc_rootfs, temp_dir) if block_backed: if snapshot_name not in self._lvm_lv_list(): @@ -1301,7 +1497,7 @@ class LxcContainerManagement(object): # Take snapshot size, measurement = self._get_lv_size( - name=self.container_name + lv_name=self.container_name ) self._lvm_snapshot_create( source_lv=self.container_name, @@ -1322,25 +1518,33 @@ class LxcContainerManagement(object): ' up old snapshot of containers before continuing.' % snapshot_name ) - - # Restore original state of container - if container_state == 'running': - if self._get_state() == 'frozen': - self.container.unfreeze() - else: - self.container.start() + elif overlayfs_backed: + lowerdir, upperdir = lxc_rootfs.split(':')[1:] + self._overlayfs_mount( + lowerdir=lowerdir, + upperdir=upperdir, + mount_point=mount_point + ) # Set the state as changed and set a new fact self.state_change = True return self._create_tar(source_dir=work_dir) finally: - if block_backed: + if block_backed or overlayfs_backed: # unmount snapshot self._unmount(mount_point) + if block_backed: # Remove snapshot self._lvm_lv_remove(snapshot_name) + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + # Remove tmpdir shutil.rmtree(temp_dir) @@ -1450,6 +1654,14 @@ def main(): choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], default='INFO' ), + clone_name=dict( + type='str', + required=False + ), + clone_snapshot=dict( + choices=BOOLEANS, + default='false' + ), archive=dict( choices=BOOLEANS, default='false' @@ -1477,4 +1689,3 @@ def main(): # import module bits from ansible.module_utils.basic import * main() - From 513724c0a5ba1aaef30d64cbd1afd713a9f550ff Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Mon, 23 Mar 2015 14:26:11 -0700 Subject: [PATCH 069/720] Add `validate_certs` param to bigip_* modules Ignoring SSL cert verification may be necessary when testing with a server that has a self-signed certificate. See https://github.com/ansible/ansible-modules-extras/pull/288#issuecomment-85196736 --- network/f5/bigip_facts.py | 19 +++++++++++++++++++ network/f5/bigip_monitor_http.py | 21 +++++++++++++++++++++ network/f5/bigip_monitor_tcp.py | 21 +++++++++++++++++++++ network/f5/bigip_node.py | 19 +++++++++++++++++++ network/f5/bigip_pool.py | 19 +++++++++++++++++++ network/f5/bigip_pool_member.py | 19 +++++++++++++++++++ 6 files changed, 118 insertions(+) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..d5f63695a61 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 session: description: - BIG-IP session support; may be useful to avoid concurrency @@ -1566,6 +1574,12 @@ def generate_software_list(f5): software_list = software.get_all_software_status() return software_list +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def main(): module = AnsibleModule( @@ -1573,6 +1587,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), session = dict(type='bool', default=False), include = dict(type='list', required=True), filter = dict(type='str', required=False), @@ -1585,6 +1600,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] if fact_filter: @@ -1601,6 +1617,9 @@ def main(): if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + if not validate_certs: + disable_ssl_cert_validation() + try: facts = {} diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 62823f86579..dd20fb04d74 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -51,6 +51,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Monitor state @@ -177,6 +185,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -311,6 +327,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -331,6 +348,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -348,6 +366,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 8b89a0c6113..78a51f2529b 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -49,6 +49,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Monitor state @@ -196,6 +204,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -331,6 +347,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -351,6 +368,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -372,6 +390,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 68b6a2b52f1..c45a7f12d5c 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool member state @@ -154,6 +162,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def node_exists(api, address): # hack to determine if node exists result = False @@ -212,6 +226,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), @@ -227,6 +242,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] host = module.params['host'] @@ -234,6 +250,9 @@ def main(): address = "/%s/%s" % (partition, name) description = module.params['description'] + if not validate_certs: + disable_ssl_cert_validation() + if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 48d03b9f1cb..e7ddce6d391 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool/pool member state @@ -235,6 +243,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -359,6 +373,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True, aliases=['pool']), partition = dict(type='str', default='Common'), @@ -380,6 +395,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] name = module.params['name'] partition = module.params['partition'] @@ -407,6 +423,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 5aef9f0ae98..6a00864056c 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool member state @@ -189,6 +197,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -282,6 +296,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), pool = dict(type='str', required=True), partition = dict(type='str', default='Common'), @@ -301,6 +316,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] pool = "/%s/%s" % (partition, module.params['pool']) @@ -312,6 +328,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): From 09dfd42d50b3477cf78aaec05467c640f822a3bd Mon Sep 17 00:00:00 2001 From: Dariusz Owczarek Date: Mon, 29 Dec 2014 16:50:43 +0100 Subject: [PATCH 070/720] new vertica modules --- database/vertica/__init__.py | 0 database/vertica/vertica_configuration.py | 198 +++++++++++ database/vertica/vertica_facts.py | 276 +++++++++++++++ database/vertica/vertica_role.py | 246 ++++++++++++++ database/vertica/vertica_schema.py | 320 ++++++++++++++++++ database/vertica/vertica_user.py | 388 ++++++++++++++++++++++ 6 files changed, 1428 insertions(+) create mode 100644 database/vertica/__init__.py create mode 100644 database/vertica/vertica_configuration.py create mode 100644 database/vertica/vertica_facts.py create mode 100644 database/vertica/vertica_role.py create mode 100644 database/vertica/vertica_schema.py create mode 100644 database/vertica/vertica_user.py diff --git a/database/vertica/__init__.py b/database/vertica/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py new file mode 100644 index 00000000000..6ee5ebe5f7f --- /dev/null +++ b/database/vertica/vertica_configuration.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_configuration +version_added: '1.0' +short_description: Updates Vertica configuration parameters. +description: + Updates Vertica configuration parameters. +options: + name: + description: + Name of the parameter to update. + required: true + default: null + value: + description: + Value of the parameter to be set. + required: true + default: null + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: updating load_balance_policy + vertica_configuration: name=failovertostandbyafter value='8 hours' +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py new file mode 100644 index 00000000000..2334cbaa227 --- /dev/null +++ b/database/vertica/vertica_facts.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_facts +version_added: '1.0' +short_description: Gathers Vertica database facts. +description: + Gathers Vertica database facts. +options: + cluster: + description: + Name of the cluster running the schema. + required: false + default: localhost + port: + description: + Database port to connect to. + required: false + default: 5433 + db: + description: + Name of the database running the schema. + required: false + default: null + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: gathering vertica facts + vertica_facts: db=db_name +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + module.exit_json(changed=False, + ansible_facts={'vertica_schemas': schema_facts, + 'vertica_users': user_facts, + 'vertica_roles': role_facts, + 'vertica_configuration': configuration_facts, + 'vertica_nodes': node_facts}) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py new file mode 100644 index 00000000000..dad6c5c3bc9 --- /dev/null +++ b/database/vertica/vertica_role.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_role +version_added: '1.0' +short_description: Adds or removes Vertica database roles and assigns roles to them. +description: + Adds or removes Vertica database role and, optionally, assign other roles. +options: + name: + description: + Name of the role to add or remove. + required: true + default: null + assigned_roles: + description: + Comma separated list of roles to assign to the role. + [Alias I(assigned_role)] + required: false + default: null + state: + description: + Whether to create C(present), drop C(absent) or lock C(locked) a role. + required: false + choices: ['present', 'absent'] + default: present + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica role + vertica_role: name=role_name db=db_name state=present + +- name: creating a new vertica role with other role assigned + vertica_role: name=role_name assigned_role=other_role_name state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + return False + return True + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py new file mode 100644 index 00000000000..7bc57a545f6 --- /dev/null +++ b/database/vertica/vertica_schema.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_schema +version_added: '1.0' +short_description: Adds or removes Vertica database schema and roles. +description: + Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + A schema will not be removed until all the objects have been dropped. + In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +options: + name: + description: + Name of the schema to add or remove. + required: true + default: null + usage_roles: + description: + Comma separated list of roles to create and grant usage access to the schema. + [Alias I(usage_role)] + required: false + default: null + create_roles: + description: + Comma separated list of roles to create and grant usage and create access to the schema. + [Alias I(create_role)] + required: false + default: null + owner: + description: + Name of the user to set as owner of the schema. + required: false + default: null + state: + description: + Whether to create C(present), or drop C(absent) a schema. + required: false + default: present + choices: ['present', 'absent'] + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica schema + vertica_schema: name=schema_name db=db_name state=present + +- name: creating a new schema with specific schema owner + vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: creating a new schema with roles + vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: + return False + if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + return False + return True + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \ + cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(default=None, aliases=['usage_role']), + create_roles=dict(default=None, aliases=['create_role']), + owner=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py new file mode 100644 index 00000000000..82182301a69 --- /dev/null +++ b/database/vertica/vertica_user.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_user +version_added: '1.0' +short_description: Adds or removes Vertica database users and assigns roles. +description: + Adds or removes Vertica database user and, optionally, assigns roles. + A user will not be removed until all the dependencies have been dropped. + In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +options: + name: + description: + Name of the user to add or remove. + required: true + default: null + profile: + description: + Sets the user's profile. + required: false + default: null + resource_pool: + description: + Sets the user's resource pool. + required: false + default: null + password: + description: + The user's password encrypted by the MD5 algorithm. + The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + required: false + default: null + expired: + description: + Sets the user's password expiration. + required: false + default: null + ldap: + description: + Set to true if users are authenticated via LDAP. + The user will be created with password expired and set to I($ldap$). + required: false + default: null + roles: + description: + Comma separated list of roles to assign to the user. + [Alias I(role)] + required: false + default: null + state: + description: + Whether to create C(present), drop C(absent) or lock C(locked) a user. + required: false + choices: ['present', 'absent', 'locked'] + default: present + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica user with password + vertica_user: name=user_name password=md5 db=db_name state=present + +- name: creating a new vertica user authenticated via ldap with roles assigned + vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \ + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'): + return False + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + return False + return True + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + state = 'lock' if locked else 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(default=None), + resource_pool=dict(default=None), + password=dict(default=None), + expired=dict(type='bool', default=None), + ldap=dict(type='bool', default=None), + roles=dict(default=None, aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() From cf9a243cd7cacc5433d3a9db0f2a3aed197464d2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 17:28:37 -0400 Subject: [PATCH 071/720] added missing __init__.py --- cloud/google/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/google/__init__.py diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 4bf87a0ab5ee3ede8afe6ca55494e91368b8698b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 22:00:07 -0400 Subject: [PATCH 072/720] corrected 'version added' --- database/vertica/vertica_configuration.py | 2 +- database/vertica/vertica_facts.py | 2 +- database/vertica/vertica_role.py | 2 +- database/vertica/vertica_schema.py | 2 +- database/vertica/vertica_user.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index 6ee5ebe5f7f..c7bdb1001d6 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_configuration -version_added: '1.0' +version_added: '2.0' short_description: Updates Vertica configuration parameters. description: Updates Vertica configuration parameters. diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index 2334cbaa227..4b963a4e377 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_facts -version_added: '1.0' +version_added: '2.0' short_description: Gathers Vertica database facts. description: Gathers Vertica database facts. diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index dad6c5c3bc9..825bb1b07e9 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_role -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database roles and assigns roles to them. description: Adds or removes Vertica database role and, optionally, assign other roles. diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index 7bc57a545f6..f3a75055d06 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_schema -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database schema and roles. description: Adds or removes Vertica database schema and, optionally, roles diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 82182301a69..1d72deca617 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_user -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database users and assigns roles. description: Adds or removes Vertica database user and, optionally, assigns roles. From 5293f452b5e0fe8cc29daa63c848e721c9729e33 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 22:04:44 -0400 Subject: [PATCH 073/720] corrected version added --- network/f5/bigip_facts.py | 2 +- network/f5/bigip_monitor_http.py | 2 +- network/f5/bigip_monitor_tcp.py | 2 +- network/f5/bigip_node.py | 2 +- network/f5/bigip_pool.py | 2 +- network/f5/bigip_pool_member.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 59a6a48aa5e..2c90b418ea2 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 session: description: - BIG-IP session support; may be useful to avoid concurrency diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index dd20fb04d74..d131eb71eee 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -58,7 +58,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Monitor state diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 78a51f2529b..5cc00fe6b68 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -56,7 +56,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Monitor state diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index c45a7f12d5c..f54fafdb64b 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -61,7 +61,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool member state diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index e7ddce6d391..425c1e97149 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -61,7 +61,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool/pool member state diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 6a00864056c..1304dfe33e5 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool member state From d7030e9604537eee524e452762a4d5e865efcba3 Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Wed, 15 Oct 2014 21:14:10 -0500 Subject: [PATCH 074/720] added color bar option to Slack module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This update will allow people to add a color bar at the front of a Slack notification using the default 3 colors by name Slack specify (good, warning, danger). If no color is specified, or the default is used (normal) then no bar will be added. Description and example also added in this update. Color bars are added by using the attachments json object inside the payload - this is a very simplistic implementation as using custom colors or adding titles or other formatting are not included in this update and if needed I’m sure somebody else can spend the time to add them later… Tested with ansible 1.7 --- notification/slack.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/notification/slack.py b/notification/slack.py index 1ae748247f9..2b1459c3d91 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -89,6 +89,16 @@ options: choices: - 'yes' - 'no' + color: + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + required: false + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' """ EXAMPLES = """ @@ -111,14 +121,24 @@ EXAMPLES = """ link_names: 0 parse: 'none' +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + slack: + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} is alive!" + color: good + username: "" + icon_url: "" """ OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color): + if color == 'normal': + payload = dict(text=text) + else: + payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: payload['channel'] = channel if (channel[0] == '#') else '#'+channel if username is not None: @@ -161,8 +181,8 @@ def main(): icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), - validate_certs = dict(default='yes', type='bool'), + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']) ) ) @@ -175,8 +195,9 @@ def main(): icon_emoji = module.params['icon_emoji'] link_names = module.params['link_names'] parse = module.params['parse'] + color = module.params['color'] - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color) do_notify_slack(module, domain, token, payload) module.exit_json(msg="OK") From 671317e0e105168738215bb79a71bdc1040741a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:07:45 -0400 Subject: [PATCH 075/720] added version added to new color option --- notification/slack.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/slack.py b/notification/slack.py index 2b1459c3d91..fc0e7403637 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -90,6 +90,7 @@ options: - 'yes' - 'no' color: + version_added: 2.0 description: - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message required: false From 8e1024ff3011416b7abb4a20aa80d135f55c99c3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:15:37 -0400 Subject: [PATCH 076/720] updated pushover version added --- notification/pushover | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/pushover b/notification/pushover index 7fd66333f54..8e9d2596d43 100644 --- a/notification/pushover +++ b/notification/pushover @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: pushover -version_added: "1.8" +version_added: "2.0" short_description: Send notifications via u(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email From dd1508d572116520b2e7b175f34db09d40956637 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:55:00 -0400 Subject: [PATCH 077/720] moved to --- packaging/{ => language}/maven_artifact.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) rename packaging/{ => language}/maven_artifact.py (97%) diff --git a/packaging/maven_artifact.py b/packaging/language/maven_artifact.py similarity index 97% rename from packaging/maven_artifact.py rename to packaging/language/maven_artifact.py index 699d97a54c2..2aeb158625b 100755 --- a/packaging/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -32,7 +32,7 @@ DOCUMENTATION = ''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository -version_added: "historical" +version_added: "2.0" description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not @@ -45,54 +45,42 @@ options: group_id: description: The Maven groupId coordinate required: true - default: null - version_added: 0.0.1 artifact_id: description: The maven artifactId coordinate required: true - default: null - version_added: 0.0.1 version: description: The maven version coordinate required: false default: latest - version_added: 0.0.1 classifier: description: The maven classifier coordinate required: false default: null - version_added: 0.0.1 extension: description: The maven type/extension coordinate required: false default: jar - version_added: 0.0.1 repository_url: description: The URL of the Maven Repository to download from required: false default: http://repo1.maven.org/maven2 - version_added: 0.0.1 username: description: The username to authenticate as to the Maven Repository required: false default: null - version_added: 0.0.1 password: description: The passwor to authenticate with to the Maven Repository required: false default: null - version_added: 0.0.1 dest: description: The path where the artifact should be written to required: true default: false - version_added: 0.0.1 state: description: The desired state of the artifact required: true default: present choices: [present,absent] - version_added: 0.0.1 ''' EXAMPLES = ''' @@ -372,4 +360,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() \ No newline at end of file +main() From 2438b74ca83cf3e6c6d6b26cedd56b0d9889f0cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:56:05 -0400 Subject: [PATCH 078/720] remove x bit --- packaging/language/maven_artifact.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 packaging/language/maven_artifact.py diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py old mode 100755 new mode 100644 From 06939a8651f42db31751aae21ad34dacc6dd63c0 Mon Sep 17 00:00:00 2001 From: tedder Date: Fri, 24 Oct 2014 14:22:50 -0700 Subject: [PATCH 079/720] add cloudtrail module Cloudtrail is the AWS auditing configuration. It's fairly simple, but also very important to configuration management/devops/security to ensure it remains enabled. That's why I created it as a module. --- cloud/cloudtrail.py | 227 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100755 cloud/cloudtrail.py diff --git a/cloud/cloudtrail.py b/cloud/cloudtrail.py new file mode 100755 index 00000000000..de1656b6dd3 --- /dev/null +++ b/cloud/cloudtrail.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: cloudtrail +short_description: manage CloudTrail creation and deletion +description: + - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. +version_added: "1.7.3" +author: Ted Timmons +requirements: ["boto"] +options: + state: + description: + - add or remove CloudTrail configuration. + required: true + choices: ['enabled', 'absent'] + name: + description: + - name for given CloudTrail configuration. + - This is a primary key and is used to identify the configuration. + s3_bucket_prefix: + description: + - bucket to place CloudTrail in. + - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html) + - required when state=enabled. + required: false + s3_key_prefix: + description: + - prefix to keys in bucket. A trailing slash is not necessary and will be removed. + required: false + include_global_events: + description: + - record API calls from global services such as IAM and STS? + required: false + default: false + choices: ["true", "false"] + + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + version_added: "1.5" + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + version_added: "1.5" + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + version_added: "1.5" + +extends_documentation_fragment: aws +""" + +EXAMPLES = """ + - name: enable cloudtrail + local_action: cloudtrail > + state=enabled name=main s3_bucket_name=ourbucket + s3_key_prefix=cloudtrail region=us-east-1 + + - name: enable cloudtrail with different configuration + local_action: cloudtrail > + state=enabled name=main s3_bucket_name=ourbucket2 + s3_key_prefix='' region=us-east-1 + + - name: remove cloudtrail + local_action: cloudtrail state=absent name=main region=us-east-1 +""" + +import time +import sys +import os +from collections import Counter + +try: + import boto + import boto.cloudtrail + from boto.regioninfo import RegionInfo +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +class CloudTrailManager: + """Handles cloudtrail configuration""" + + def __init__(self, module, region=None, **aws_connect_params): + self.module = module + self.region = region + self.aws_connect_params = aws_connect_params + self.changed = False + + try: + self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + self.module.fail_json(msg=str(e)) + + def view_status(self, name): + return self.conn.get_trail_status(name) + + def view(self, name): + ret = self.conn.describe_trails(trail_name_list=[name]) + trailList = ret.get('trailList', []) + if len(trailList) == 1: + return trailList[0] + return None + + def exists(self, name=None): + ret = self.view(name) + if ret: + return True + return False + + def enable_logging(self, name): + '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.''' + self.conn.start_logging(name) + + + def enable(self, **create_args): + return self.conn.create_trail(**create_args) + + def update(self, **create_args): + return self.conn.update_trail(**create_args) + + def delete(self, name): + '''Delete a given cloudtrial configuration. Throws Exception on error.''' + self.conn.delete_trail(name) + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['enabled', 'absent'] }, + name={'required': True, 'type': 'str' }, + s3_bucket_name={'required': False, 'type': 'str' }, + s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, + include_global_events={'default':True, 'required': False, 'type': 'bool' }, + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + ec2_url, access_key, secret_key, region = get_ec2_creds(module) + aws_connect_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + if module.params['state'] == 'enabled' and not module.params['s3_bucket_name']: + module.fail_json(msg="s3_bucket_name must be specified as a parameter when creating a cloudtrail") + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ct_name = module.params['name'] + s3_bucket_name = module.params['s3_bucket_name'] + # remove trailing slash from the key prefix, really messes up the key structure. + s3_key_prefix = module.params['s3_key_prefix'].rstrip('/') + include_global_events = module.params['include_global_events'] + + #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + # module.fail_json(msg="ELBs are required for registration or viewing") + + cf_man = CloudTrailManager(module, region=region, **aws_connect_params) + + results = { 'changed': False } + if module.params['state'] == 'enabled': + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + results['view'] = cf_man.view(ct_name) + # only update if the values have changed. + if results['view']['S3BucketName'] != s3_bucket_name or \ + results['view']['S3KeyPrefix'] != s3_key_prefix or \ + results['view']['IncludeGlobalServiceEvents'] != include_global_events: + if not module.check_mode: + results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + else: + if not module.check_mode: + # doesn't exist. create it. + results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + + # given cloudtrail should exist now. Enable the logging. + results['view_status'] = cf_man.view_status(ct_name) + results['was_logging_enabled'] = results['view_status'].get('IsLogging', False) + if not results['was_logging_enabled']: + if not module.check_mode: + cf_man.enable_logging(ct_name) + results['logging_enabled'] = True + results['changed'] = True + + # delete the cloudtrai + elif module.params['state'] == 'absent': + # check to see if it exists before deleting. + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + # it exists, so we should delete it and mark changed. + if not module.check_mode: + cf_man.delete(ct_name) + results['changed'] = True + + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 61114cd08a506a4d0e9daebbf9f295fc921909dd Mon Sep 17 00:00:00 2001 From: tedder Date: Fri, 24 Oct 2014 14:41:47 -0700 Subject: [PATCH 080/720] Handful of changes after bcoca's code review: * update expected inclusion version * fix consistency on enabled/absent (now enabled/disabled) * safely import boto per now style of single-exit and proper JSON * use new `required_together` module style --- cloud/cloudtrail.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/cloud/cloudtrail.py b/cloud/cloudtrail.py index de1656b6dd3..777f1df846c 100755 --- a/cloud/cloudtrail.py +++ b/cloud/cloudtrail.py @@ -20,7 +20,7 @@ module: cloudtrail short_description: manage CloudTrail creation and deletion description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. -version_added: "1.7.3" +version_added: "2.0" author: Ted Timmons requirements: ["boto"] options: @@ -28,7 +28,7 @@ options: description: - add or remove CloudTrail configuration. required: true - choices: ['enabled', 'absent'] + choices: ['enabled', 'disabled'] name: description: - name for given CloudTrail configuration. @@ -76,12 +76,12 @@ extends_documentation_fragment: aws EXAMPLES = """ - name: enable cloudtrail - local_action: cloudtrail > + local_action: cloudtrail state=enabled name=main s3_bucket_name=ourbucket s3_key_prefix=cloudtrail region=us-east-1 - name: enable cloudtrail with different configuration - local_action: cloudtrail > + local_action: cloudtrail state=enabled name=main s3_bucket_name=ourbucket2 s3_key_prefix='' region=us-east-1 @@ -94,13 +94,13 @@ import sys import os from collections import Counter +boto_import_failed = False try: import boto import boto.cloudtrail from boto.regioninfo import RegionInfo except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + boto_import_failed = True class CloudTrailManager: """Handles cloudtrail configuration""" @@ -150,23 +150,25 @@ class CloudTrailManager: def main(): + + if not has_libcloud: + module.fail_json(msg='boto is required.') + argument_spec = ec2_argument_spec() argument_spec.update(dict( - state={'required': True, 'choices': ['enabled', 'absent'] }, + state={'required': True, 'choices': ['enabled', 'disabled'] }, name={'required': True, 'type': 'str' }, s3_bucket_name={'required': False, 'type': 'str' }, s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, include_global_events={'default':True, 'required': False, 'type': 'bool' }, )) + required_together = ( ['state', 's3_bucket_name'] ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key) - if module.params['state'] == 'enabled' and not module.params['s3_bucket_name']: - module.fail_json(msg="s3_bucket_name must be specified as a parameter when creating a cloudtrail") - if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") @@ -209,7 +211,7 @@ def main(): results['changed'] = True # delete the cloudtrai - elif module.params['state'] == 'absent': + elif module.params['state'] == 'disabled': # check to see if it exists before deleting. results['exists'] = cf_man.exists(name=ct_name) if results['exists']: From 520f4102570936e9486921b58772ec2da6a7582e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Mar 2015 01:15:34 -0400 Subject: [PATCH 081/720] moved cloudtrail to amazon subdir --- cloud/amazon/__init__.py | 0 cloud/{ => amazon}/cloudtrail.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/amazon/__init__.py rename cloud/{ => amazon}/cloudtrail.py (100%) diff --git a/cloud/amazon/__init__.py b/cloud/amazon/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudtrail.py b/cloud/amazon/cloudtrail.py similarity index 100% rename from cloud/cloudtrail.py rename to cloud/amazon/cloudtrail.py From f86ce495c7e8b29c02b25ebfabc6bdb2adc5e3b6 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Fri, 17 Oct 2014 14:58:28 -0700 Subject: [PATCH 082/720] Add enabled/disabled support to bigip_node This allows one to enable or disable a node, useful for when doing maintenance on a node to prevent connections from being attempted to it. This will completely disable the node for any pool it might be in. --- network/f5/bigip_node.py | 64 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index f54fafdb64b..ca212763881 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -67,7 +67,7 @@ options: - Pool member state required: true default: present - choices: ['present', 'absent'] + choices: ['present', 'absent', 'enabled', 'disabled'] aliases: [] partition: description: @@ -78,7 +78,7 @@ options: aliases: [] name: description: - - "Node name" + - "Node name. Required when state=enabled/disabled" required: false default: null choices: [] @@ -145,6 +145,11 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" + - name: Disable node + bigip_node: server=lb.mydomain.com user=admin password=mysecret + state=disabled name=mynodename + delegate_to: localhost + ''' try: @@ -158,6 +163,13 @@ else: # bigip_node module specific # +# map of state values +STATES={'enabled': 'STATE_ENABLED', + 'disabled': 'STATE_DISABLED'} +STATUSES={'enabled': 'SESSION_STATUS_ENABLED', + 'disabled': 'SESSION_STATUS_DISABLED', + 'offline': 'SESSION_STATUS_FORCED_DISABLED'} + def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api @@ -220,6 +232,25 @@ def set_node_description(api, name, description): def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] +def set_node_disabled(api, name): + set_node_session_enabled_state(api, name, STATES['disabled']) + result = True + desc = "" + return (result, desc) + +def set_node_enabled(api, name): + set_node_session_enabled_state(api, name, STATES['enabled']) + result = True + desc = "" + return (result, desc) + +def set_node_session_enabled_state(api, name, state): + api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], + states=[state]) + +def get_node_session_status(api, name): + return api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] + def main(): module = AnsibleModule( argument_spec = dict( @@ -227,7 +258,8 @@ def main(): user = dict(type='str', required=True), password = dict(type='str', required=True), validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + state = dict(type='str', default='present', + choices=['present', 'absent', 'disabled', 'enabled']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), @@ -302,6 +334,32 @@ def main(): set_node_description(api, address, description) result = {'changed': True} + elif state in ('disabled', 'enabled'): + if name is None: + module.fail_json(msg="name parameter required when " \ + "state=enabled/disabled") + if not module.check_mode: + if not node_exists(api, name): + module.fail_json(msg="node does not exist") + status = get_node_session_status(api, name) + if state == 'disabled': + if status not in (STATUSES['disabled'], STATUSES['offline']): + disabled, desc = set_node_disabled(api, name) + if not disabled: + module.fail_json(msg="unable to disable: %s" % desc) + else: + result = {'changed': True} + else: + if status != STATUSES['enabled']: + enabled, desc = set_node_enabled(api, name) + if not enabled: + module.fail_json(msg="unable to enable: %s" % desc) + else: + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + except Exception, e: module.fail_json(msg="received exception: %s" % e) From 2b192c7f8e0c99830b2beabc977ace5963c5ebc1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:36:21 -0700 Subject: [PATCH 083/720] remove non-ascii quotes from message string Fixes https://github.com/ansible/ansible/pull/8564 --- system/locale_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index 5d53951cf18..c5943cd63a0 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -159,7 +159,7 @@ def main(): # Ubuntu created its own system to manage locales. ubuntuMode = True else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") else: # We found the common way to manage locales. ubuntuMode = False From 400166a655b304094005aace178d0fab1cfe9763 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:42:08 -0700 Subject: [PATCH 084/720] Port is an integer so use arg_spec to enforce that. --- database/mysql/mysql_replication.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 07d09602b6b..30811cdc924 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -239,7 +239,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), master_host=dict(default=None), @@ -304,10 +304,10 @@ def main(): try: if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") try: From 3b54e7b00ec9d3c614a4821d15253dbc9166dc10 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 28 Mar 2015 00:07:01 -0700 Subject: [PATCH 085/720] add zabbix_host --- monitoring/zabbix_host.py | 458 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 458 insertions(+) create mode 100644 monitoring/zabbix_host.py diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py new file mode 100644 index 00000000000..0d3cc8e661f --- /dev/null +++ b/monitoring/zabbix_host.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_host +short_description: Zabbix host creates/updates/deletes +description: + - When the host does not exists, a new host will be created, added to any host groups and linked to any templates. + - When the host already exists, the host group membership will be updated, along with the template links and interfaces. + - Delete a host from Zabbix if the host exists. +version_added: "1.9" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + host_name: + description: + - Technical name of the host. + - If the host has already been added, the host name won't be updated. + required: true + host_groups: + description: + - List of host groups to add the host to. + required: false + link_templates: + description: + - List of templates to be linked to the host. + required: false + default: None + status: + description: + - Status and function of the host. + - Possible values are: enabled and disabled + required: false + default: "enabled" + state: + description: + - create/update or delete host. + - Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host. + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 + interfaces: + description: + - List of interfaces to be created for the host (see example). + - Available values are: dns, ip, main, port, type and useip. + - Please review the interface documentation for more information on the supported properties: + - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface + required: false +''' + +EXAMPLES = ''' +- name: Create a new host or update an existing host's info + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_groups: + - Example group1 + - Example group2 + link_templates: + - Example template1 + - Example template2 + status: enabled + state: present + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 10050 + - type: 4 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 12345 +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far), +# it does not support the 'hostinterface' api calls, +# so we have to inherit the ZabbixAPI class to add 'hostinterface' support. +class ZabbixAPIExtends(ZabbixAPI): + hostinterface = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs)) + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % group_name) + return True + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}}) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template) + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_host(self, host_name, group_ids, status, interfaces): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}) + if len(host_list) >= 1: + return host_list['hostids'][0] + except Exception, e: + self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) + + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status}) + interface_list_copy = exist_interface_list + if interfaces: + for interface in interfaces: + flag = False + interface_str = interface + for exist_interface in exist_interface_list: + interface_type = interface['type'] + exist_interface_type = int(exist_interface['type']) + if interface_type == exist_interface_type: + # update + interface_str['interfaceid'] = exist_interface['interfaceid'] + self._zapi.hostinterface.update(interface_str) + flag = True + interface_list_copy.remove(exist_interface) + break + if not flag: + # add + interface_str['hostid'] = host_id + self._zapi.hostinterface.create(interface_str) + # remove + remove_interface_ids = [] + for remove_interface in interface_list_copy: + interface_id = remove_interface['interfaceid'] + remove_interface_ids.append(interface_id) + if len(remove_interface_ids) > 0: + self._zapi.hostinterface.delete(remove_interface_ids) + except Exception, e: + self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) + + def delete_host(self, host_id, host_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.delete({'hostid': host_id}) + except Exception, e: + self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) + + # get host by host name + def get_host_by_host_name(self, host_name): + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + group_ids = [] + if self.check_host_group_exist(group_names): + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append({'groupid': group_id}) + return group_ids + + # get host templates by host id + def get_host_templates_by_host_id(self, host_id): + template_ids = [] + template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id}) + for template in template_list: + template_ids.append(template['templateid']) + return template_ids + + # get host groups by host id + def get_host_groups_by_host_id(self, host_id): + exist_host_groups = [] + host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id}) + + if len(host_groups_list) >= 1: + for host_groups_name in host_groups_list: + exist_host_groups.append(host_groups_name['name']) + return exist_host_groups + + # check the exist_interfaces whether it equals the interfaces or not + def check_interface_properties(self, exist_interface_list, interfaces): + interfaces_port_list = [] + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(int(interface['port'])) + + exist_interface_ports = [] + if len(exist_interface_list) >= 1: + for exist_interface in exist_interface_list: + exist_interface_ports.append(int(exist_interface['port'])) + + if set(interfaces_port_list) != set(exist_interface_ports): + return True + + for exist_interface in exist_interface_list: + exit_interface_port = int(exist_interface['port']) + for interface in interfaces: + interface_port = int(interface['port']) + if interface_port == exit_interface_port: + for key in interface.keys(): + if str(exist_interface[key]) != str(interface[key]): + return True + + return False + + # get the status of host by host + def get_host_status_by_host(self, host): + return host['status'] + + # check all the properties before link or clear template + def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, host): + # get the existing host's groups + exist_host_groups = self.get_host_groups_by_host_id(host_id) + if set(host_groups) != set(exist_host_groups): + return True + + # get the existing status + exist_status = self.get_host_status_by_host(host) + if int(status) != int(exist_status): + return True + + # check the exist_interfaces whether it equals the interfaces or not + if self.check_interface_properties(exist_interfaces, interfaces): + return True + + # get the existing templates + exist_template_ids = self.get_host_templates_by_host_id(host_id) + if set(list(template_ids)) != set(exist_template_ids): + return True + + return False + + # link or clear template of the host + def link_or_clear_template(self, host_id, template_id_list): + # get host's exist template ids + exist_template_id_list = self.get_host_templates_by_host_id(host_id) + + exist_template_ids = set(exist_template_id_list) + template_ids = set(template_id_list) + template_id_list = list(template_ids) + + # get unlink and clear templates + templates_clear = exist_template_ids.difference(template_ids) + templates_clear_list = list(templates_clear) + request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception, e: + self._module.fail_json(msg="Failed to link template to host: %s" % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + host_groups=dict(required=False), + link_templates=dict(required=False), + status=dict(default="enabled"), + state=dict(default="present"), + timeout=dict(default=10), + interfaces=dict(required=False) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + host_groups = module.params['host_groups'] + link_templates = module.params['link_templates'] + status = module.params['status'] + state = module.params['state'] + timeout = module.params['timeout'] + interfaces = module.params['interfaces'] + + # convert enabled to 0; disabled to 1 + status = 1 if status == "disabled" else 0 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + template_ids = [] + if link_templates: + template_ids = host.get_template_ids(link_templates) + + group_ids = [] + + if host_groups: + group_ids = host.get_group_ids_by_group_names(host_groups) + + ip = "" + if interfaces: + for interface in interfaces: + if interface['type'] == 1: + ip = interface['ip'] + + # check if host exist + is_host_exist = host.is_host_exist(host_name) + + if is_host_exist: + # get host id by host name + zabbix_host_obj = host.get_host_by_host_name(host_name) + host_id = zabbix_host_obj['hostid'] + + if state == "absent": + # remove host + host.delete_host(host_id, host_name) + module.exit_json(changed=True, result="Successfully delete host %s" % host_name) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + + # get exist host's interfaces + exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) + exist_interfaces_copy = copy.deepcopy(exist_interfaces) + + # update host + interfaces_len = len(interfaces) if interfaces else 0 + + if len(exist_interfaces) > interfaces_len: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, zabbix_host_obj): + host.link_or_clear_template(host_id, template_ids) + host.update_host(host_name, group_ids, status, host_id, + interfaces, exist_interfaces) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces_copy, zabbix_host_obj): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) + + if not interfaces or (interfaces and len(interfaces) == 0): + module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) + + # create host + host_id = host.add_host(host_name, group_ids, status, interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( + host_name, ip, link_templates)) + +from ansible.module_utils.basic import * +main() + From d0256c593a7973f35c6ab5462afc93a408a1d453 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 14 Dec 2014 15:14:23 -0800 Subject: [PATCH 086/720] add zabbix_hostmacro --- monitoring/zabbix_hostmacro.py | 233 +++++++++++++++++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100644 monitoring/zabbix_hostmacro.py diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py new file mode 100644 index 00000000000..6cd95b714f8 --- /dev/null +++ b/monitoring/zabbix_hostmacro.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_hostmacro +short_description: Zabbix host macro creates/updates/deletes +description: + - When the host macro does not exists, a new macro will be created, added to specific host. + - When the host macro already exists, the value will be updated. + - Delete a host macro from Zabbix if the macro exists. +version_added: "1.9" +author: Dean Hailin Song +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + host_name: + description: + - Technical name of the host. + - If the host has already been added, the host name won't be updated. + required: true + macro_name: + description: + - Technical name of the host macro. + required: true + macro_value: + description: + - Value of the host macro. + required: true + state: + description: + - create/update or delete macro. + - Possible values are: present and absent. If the macro already exists, and the state is "present", just to update the macro. + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 +''' + +EXAMPLES = ''' +- name: Create a new host macro or update an existing macro's value + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name:Example macro + macro_value:Example value + state: present +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far). +class ZabbixAPIExtends(ZabbixAPI): + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + + +class HostMacro(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # get host id by host name + def get_host_id(self, host_name): + try: + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + host_id = host_list[0]['hostid'] + return host_id + except Exception, e: + self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) + + # get host macro + def get_host_macro(self, macro_name, host_id): + try: + host_macro_list = self._zapi.usermacro.get( + {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}}) + if len(host_macro_list) > 0: + return host_macro_list[0] + return None + except Exception, e: + self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) + + # create host macro + def create_host_macro(self, macro_name, macro_value, host_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) + + # update host macro + def update_host_macro(self, host_macro_obj, macro_name, macro_value): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e)) + + # delete host macro + def delete_host_macro(self, host_macro_obj, macro_name): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.delete([host_macro_id]) + self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + macro_name=dict(required=True), + macro_value=dict(required=True), + state=dict(default="present"), + timeout=dict(default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + macro_name = (module.params['macro_name']).upper() + macro_value = module.params['macro_value'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host_macro_class_obj = HostMacro(module, zbx) + + changed = False + + if host_name: + host_id = host_macro_class_obj.get_host_id(host_name) + host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id) + + if state == 'absent': + if not host_macro_obj: + module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name) + else: + # delete a macro + host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name) + else: + if not host_macro_obj: + # create host macro + host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id) + else: + # update host macro + host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) + +from ansible.module_utils.basic import * +main() + From 092cb5b4cdf2515ab105ecd60ac135535454aacb Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 28 Mar 2015 08:18:25 -0700 Subject: [PATCH 087/720] add zabbix_screen --- monitoring/zabbix_screen.py | 423 ++++++++++++++++++++++++++++++++++++ 1 file changed, 423 insertions(+) create mode 100644 monitoring/zabbix_screen.py diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py new file mode 100644 index 00000000000..06e336ec368 --- /dev/null +++ b/monitoring/zabbix_screen.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: zabbix_screen +short_description: Zabbix screen creates/updates/deletes +description: + - When the screen does not exists, a new screen will be created with any screen items specified. + - When the screen already exists and the graphs have changed, the screen items will be updated. + - When the graph IDs have not changed, the screen items won't be updated unless the graph_width and graph_height have changed. + - Delete screen(s) from Zabbix if the screen(s) exists. +version_added: "1.9" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + timeout: + description: + - The timeout of API request(seconds). + default: 10 + zabbix_screens: + description: + - List of screens to be created/updated/deleted(see example). + - If the screen(s) already been added, the screen(s) name won't be updated. + - When creating or updating screen(s), the screen_name, host_group are required. + - When deleting screen(s), the screen_name is required. + - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. + required: true + default: null +notes: + - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = ''' +# Create/update a screen. +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Create/update multi-screen +- name: Create two of new screens or update the existing screens' items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + - screen_name: ExampleScreen2 + host_group: Example group2 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + screens: + - screen_name: ExampleScreen + host_group: Example group + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + when: inventory_hostname==groups['group_name'][0] +''' + +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import ZabbixAPIException + from zabbix_api import Already_Exists + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call, +# we have to inherit the ZabbixAPI class to add 'screenitem' support. +class ZabbixAPIExtends(ZabbixAPI): + screenitem = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) + + +class Screen(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get group id by group name + def get_host_group_id(self, group_name): + if group_name == "": + self._module.fail_json(msg="group_name is required") + hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) + if len(hostGroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % group_name) + else: + hostGroup_id = hostGroup_list[0]['groupid'] + return hostGroup_id + + # get monitored host_id by host_group_id + def get_host_ids_by_group_id(self, group_id): + host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) + if len(host_list) < 1: + self._module.fail_json(msg="No host in the group.") + else: + host_ids = [] + for i in host_list: + host_id = i['hostid'] + host_ids.append(host_id) + return host_ids + + # get screen + def get_screen_id(self, screen_name): + if screen_name == "": + self._module.fail_json(msg="screen_name is required") + try: + screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) + if len(screen_id_list) >= 1: + screen_id = screen_id_list[0]['screenid'] + return screen_id + return None + except Exception as e: + self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) + + # create screen + def create_screen(self, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) + return screen['screenids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) + + # update screen + def update_screen(self, screen_id, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) + except Exception as e: + self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) + + # delete screen + def delete_screen(self, screen_id, screen_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.delete([screen_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) + + # get graph ids + def get_graph_ids(self, hosts, graph_name_list): + graph_id_lists = [] + vsize = 1 + for host in hosts: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + size = len(graph_id_list) + if size > 0: + graph_id_lists.extend(graph_id_list) + if vsize < size: + vsize = size + return graph_id_lists, vsize + + # getGraphs + def get_graphs_by_host_id(self, graph_name_list, host_id): + graph_ids = [] + for graph_name in graph_name_list: + graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) + graph_id_list = [] + if len(graphs_list) > 0: + for graph in graphs_list: + graph_id = graph['graphid'] + graph_id_list.append(graph_id) + if len(graph_id_list) > 0: + graph_ids.extend(graph_id_list) + return graph_ids + + # get screen items + def get_screen_items(self, screen_id): + screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) + return screen_item_list + + # delete screen items + def delete_screen_items(self, screen_id, screen_item_id_list): + try: + if len(screen_item_id_list) == 0: + return True + screen_item_list = self.get_screen_items(screen_id) + if len(screen_item_list) > 0: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screenitem.delete(screen_item_id_list) + return True + return False + except ZabbixAPIException: + pass + + # get screen's hsize and vsize + def get_hsize_vsize(self, hosts, v_size): + h_size = len(hosts) + if h_size == 1: + if v_size == 1: + h_size = 1 + elif v_size in range(2, 9): + h_size = 2 + else: + h_size = 3 + v_size = (v_size - 1) / h_size + 1 + return h_size, v_size + + # create screen_items + def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size): + if len(hosts) < 4: + if width is None or width < 0: + width = 500 + else: + if width is None or width < 0: + width = 200 + if height is None or height < 0: + height = 100 + + try: + # when there're only one host, only one row is not good. + if len(hosts) == 1: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) + for i, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + else: + for i, host in enumerate(hosts): + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + for j, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + except Already_Exists: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + timeout=dict(default=10), + screens=dict(required=True) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + timeout = module.params['timeout'] + screens = module.params['screens'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + screen = Screen(module, zbx) + created_screens = [] + changed_screens = [] + deleted_screens = [] + + for zabbix_screen in screens: + screen_name = zabbix_screen['screen_name'] + screen_id = screen.get_screen_id(screen_name) + state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present" + + if state == "absent": + if screen_id: + screen_item_list = screen.get_screen_items(screen_id) + screen_item_id_list = [] + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + screen_item_id_list.append(screen_item_id) + screen.delete_screen_items(screen_id, screen_item_id_list) + screen.delete_screen(screen_id, screen_name) + + deleted_screens.append(screen_name) + else: + host_group = zabbix_screen['host_group'] + graph_names = zabbix_screen['graph_names'] + graph_width = None + if 'graph_width' in zabbix_screen: + graph_width = zabbix_screen['graph_width'] + graph_height = None + if 'graph_height' in zabbix_screen: + graph_height = zabbix_screen['graph_height'] + host_group_id = screen.get_host_group_id(host_group) + hosts = screen.get_host_ids_by_group_id(host_group_id) + + screen_item_id_list = [] + resource_id_list = [] + + graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) + h_size, v_size = screen.get_hsize_vsize(hosts, v_size) + + if not screen_id: + # create screen + screen_id = screen.create_screen(screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + created_screens.append(screen_name) + else: + screen_item_list = screen.get_screen_items(screen_id) + + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + resource_id = screen_item['resourceid'] + screen_item_id_list.append(screen_item_id) + resource_id_list.append(resource_id) + + # when the screen items changed, then update + if graph_ids != resource_id_list: + deleted = screen.delete_screen_items(screen_id, screen_item_id_list) + if deleted: + screen.update_screen(screen_id, screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + changed_screens.append(screen_name) + + if created_screens and changed_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) + elif created_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) + elif changed_screens: + module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) + elif deleted_screens: + module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) + else: + module.exit_json(changed=False) + +# <> +main() From c08ce5b30dc16be4d97a9cba36f01186310980b4 Mon Sep 17 00:00:00 2001 From: Tyler Kellen Date: Sat, 28 Mar 2015 13:20:30 -0600 Subject: [PATCH 088/720] correct version_added for known_hosts It was added in 1.9, not 1.6. --- system/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index d4a6e9c35e0..893eca3dcb7 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -26,7 +26,7 @@ description: - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. This is useful if you're going to want to use the M(git) module over ssh, for example. If you have a very large number of host keys to manage, you will find the M(template) module more useful. -version_added: "1.6" +version_added: "1.9" options: name: aliases: [ 'host' ] From 7c41002d933020cd781a12b7428e13ac20432864 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:37:15 -0700 Subject: [PATCH 089/720] remove superfluous defaults --- monitoring/zabbix_host.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 0d3cc8e661f..a425afda1cd 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -37,18 +37,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - Technical name of the host. @@ -340,7 +337,7 @@ class Host(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), host_name=dict(required=True), From 636e96fafc567cf615816f7207242be14e96e03b Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:41:07 -0700 Subject: [PATCH 090/720] remove superfluous defaults --- monitoring/zabbix_hostmacro.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 6cd95b714f8..f0f1a7efdd1 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -37,18 +37,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - Technical name of the host. @@ -174,7 +171,7 @@ class HostMacro(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), host_name=dict(required=True), From 32878110d07e43511b4d631a21cb433840def816 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:41:52 -0700 Subject: [PATCH 091/720] remove superfluous defaults --- monitoring/zabbix_screen.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 06e336ec368..ff69d24ea4e 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -39,18 +39,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null timeout: description: - The timeout of API request(seconds). @@ -63,7 +60,6 @@ options: - When deleting screen(s), the screen_name is required. - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. required: true - default: null notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. ''' @@ -321,7 +317,7 @@ class Screen(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), timeout=dict(default=10), From 510b77ca0e7b72940a2f0acd45c0df2a7a524e26 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 16:51:30 -0400 Subject: [PATCH 092/720] minor documentation fixes --- monitoring/zabbix_host.py | 29 +++++++++++------------------ monitoring/zabbix_hostmacro.py | 18 +++++------------- monitoring/zabbix_screen.py | 14 +++----------- notification/pushover | 4 ++-- 4 files changed, 21 insertions(+), 44 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 0d3cc8e661f..63b3178e1ac 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -24,10 +24,8 @@ DOCUMENTATION = ''' module: zabbix_host short_description: Zabbix host creates/updates/deletes description: - - When the host does not exists, a new host will be created, added to any host groups and linked to any templates. - - When the host already exists, the host group membership will be updated, along with the template links and interfaces. - - Delete a host from Zabbix if the host exists. -version_added: "1.9" + - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. +version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - zabbix-api python module @@ -35,44 +33,38 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - - Zabbix user name. + - Zabbix user name, used to authenticate against the server. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - - Technical name of the host. - - If the host has already been added, the host name won't be updated. + - Name of the host in Zabbix. + - host_name is the unique identifier used and cannot be updated using this module. required: true host_groups: description: - - List of host groups to add the host to. + - List of host groups the host is part of. required: false link_templates: description: - - List of templates to be linked to the host. + - List of templates linked to the host. required: false default: None status: description: - - Status and function of the host. - - Possible values are: enabled and disabled + - 'Monitoring status of the host. Possible values are: "enabled" and "disabled".' required: false default: "enabled" state: description: - - create/update or delete host. - - Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host. + - 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.' required: false default: "present" timeout: @@ -81,11 +73,12 @@ options: default: 10 interfaces: description: - - List of interfaces to be created for the host (see example). + - List of interfaces to be created for the host (see example below). - Available values are: dns, ip, main, port, type and useip. - Please review the interface documentation for more information on the supported properties: - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface required: false + default: [] ''' EXAMPLES = ''' diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 6cd95b714f8..871a974f413 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -24,10 +24,8 @@ DOCUMENTATION = ''' module: zabbix_hostmacro short_description: Zabbix host macro creates/updates/deletes description: - - When the host macro does not exists, a new macro will be created, added to specific host. - - When the host macro already exists, the value will be updated. - - Delete a host macro from Zabbix if the macro exists. -version_added: "1.9" + - manages Zabbix host macros, it can create, update or delete them. +version_added: "2.0" author: Dean Hailin Song requirements: - zabbix-api python module @@ -35,28 +33,23 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - - Technical name of the host. - - If the host has already been added, the host name won't be updated. + - Name of the host. required: true macro_name: description: - - Technical name of the host macro. + - Name of the host macro. required: true macro_value: description: @@ -64,8 +57,7 @@ options: required: true state: description: - - create/update or delete macro. - - Possible values are: present and absent. If the macro already exists, and the state is "present", just to update the macro. + - 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.' required: false default: "present" timeout: diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 06e336ec368..b0a886a2c0c 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -25,11 +25,8 @@ DOCUMENTATION = ''' module: zabbix_screen short_description: Zabbix screen creates/updates/deletes description: - - When the screen does not exists, a new screen will be created with any screen items specified. - - When the screen already exists and the graphs have changed, the screen items will be updated. - - When the graph IDs have not changed, the screen items won't be updated unless the graph_width and graph_height have changed. - - Delete screen(s) from Zabbix if the screen(s) exists. -version_added: "1.9" + - This module allows you to create, modify and delete Zabbix screens and associated graph data. +version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - zabbix-api python module @@ -37,20 +34,16 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null timeout: description: - The timeout of API request(seconds). @@ -61,9 +54,8 @@ options: - If the screen(s) already been added, the screen(s) name won't be updated. - When creating or updating screen(s), the screen_name, host_group are required. - When deleting screen(s), the screen_name is required. - - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. + - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed. required: true - default: null notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. ''' diff --git a/notification/pushover b/notification/pushover index 8e9d2596d43..3e710ca02dd 100644 --- a/notification/pushover +++ b/notification/pushover @@ -27,10 +27,10 @@ version_added: "2.0" short_description: Send notifications via u(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. + addresses. Requires pushover app on devices. notes: - You will require a pushover.net account to use this module. But no account - is required to receive messages. + is required to receive messages. options: msg: description: From 231ed6208da21c2b7436ba789e62a7ccd654f7a9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 17:05:53 -0400 Subject: [PATCH 093/720] removed requirements which was causing crash with dupe from shared doc fragments --- cloud/amazon/cloudtrail.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 777f1df846c..b58bcd6e1d0 100755 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -22,7 +22,6 @@ description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. version_added: "2.0" author: Ted Timmons -requirements: ["boto"] options: state: description: From 4c0b1b42bc79634052b69557fc804f13e9557669 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 20:15:40 -0400 Subject: [PATCH 094/720] minor fixes to gluster: - removed functions from main scope - renamed rebalance function to disambiguate from variable - updated docs with defaults - added exception handling to command execution --- system/gluster_volume.py | 317 ++++++++++++++++++++------------------- 1 file changed, 165 insertions(+), 152 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index d51512a1436..e78b1a1bfaa 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -38,27 +38,33 @@ options: use started/stopped to control it's availability. cluster: required: false + default: null description: - List of hosts to use for probing and brick setup host: required: false + default: null description: - Override local hostname (for peer probing purposes) replicas: required: false + default: null description: - Replica count for volume stripes: required: false + default: null description: - Stripe count for volume transport: required: false choices: [ 'tcp', 'rdma', 'tcp,rdma' ] + default: 'tcp' description: - Transport type for volume brick: required: false + default: null description: - Brick path on servers start_on_create: @@ -69,22 +75,27 @@ options: rebalance: choices: [ 'yes', 'no'] required: false + default: 'no' description: - Controls whether the cluster is rebalanced after changes directory: required: false + default: null description: - Directory for limit-usage options: required: false + default: null description: - A dictionary/hash with options/settings for the volume quota: required: false + default: null description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) force: required: false + default: null description: - If brick is being created in the root partition, module will fail. Set force to true to override this behaviour @@ -119,165 +130,167 @@ import shutil import time import socket -def main(): - - def run_gluster(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) +def run_gluster(gargs, **kwargs): + args = [glusterbin] + args.extend(gargs) + try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def run_gluster_nofail(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - return None - return out - - def run_gluster_yes(gargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, data='y\n') - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def get_peers(): - out = run_gluster([ 'peer', 'status']) - i = 0 - peers = {} - hostname = None - uuid = None - state = None - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'hostname': - hostname = value - if key.lower() == 'uuid': - uuid = value - if key.lower() == 'state': - state = value - peers[hostname] = [ uuid, state ] - return peers - - def get_volumes(): - out = run_gluster([ 'volume', 'info' ]) - - volumes = {} - volume = {} - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'volume name': - volume['name'] = value + except Exception, e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)) + return out + +def run_gluster_nofail(gargs, **kwargs): + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + return None + return out + +def run_gluster_yes(gargs): + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, data='y\n') + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) + return out + +def get_peers(): + out = run_gluster([ 'peer', 'status']) + i = 0 + peers = {} + hostname = None + uuid = None + state = None + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'hostname': + hostname = value + if key.lower() == 'uuid': + uuid = value + if key.lower() == 'state': + state = value + peers[hostname] = [ uuid, state ] + return peers + +def get_volumes(): + out = run_gluster([ 'volume', 'info' ]) + + volumes = {} + volume = {} + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'volume name': + volume['name'] = value + volume['options'] = {} + volume['quota'] = False + if key.lower() == 'volume id': + volume['id'] = value + if key.lower() == 'status': + volume['status'] = value + if key.lower() == 'transport-type': + volume['transport'] = value + if key.lower() != 'bricks' and key.lower()[:5] == 'brick': + if not 'bricks' in volume: + volume['bricks'] = [] + volume['bricks'].append(value) + # Volume options + if '.' in key: + if not 'options' in volume: volume['options'] = {} - volume['quota'] = False - if key.lower() == 'volume id': - volume['id'] = value - if key.lower() == 'status': - volume['status'] = value - if key.lower() == 'transport-type': - volume['transport'] = value - if key.lower() != 'bricks' and key.lower()[:5] == 'brick': - if not 'bricks' in volume: - volume['bricks'] = [] - volume['bricks'].append(value) - # Volume options - if '.' in key: - if not 'options' in volume: - volume['options'] = {} - volume['options'][key] = value - if key == 'features.quota' and value == 'on': - volume['quota'] = True - else: - if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': - if len(volume) > 0: - volumes[volume['name']] = volume - volume = {} - return volumes - - def get_quotas(name, nofail): - quotas = {} - if nofail: - out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) - if not out: - return quotas + volume['options'][key] = value + if key == 'features.quota' and value == 'on': + volume['quota'] = True else: - out = run_gluster([ 'volume', 'quota', name, 'list' ]) - for row in out.split('\n'): - if row[:1] == '/': - q = re.split('\s+', row) - quotas[q[0]] = q[1] - return quotas - - def wait_for_peer(host): - for x in range(0, 4): - peers = get_peers() - if host in peers and peers[host][1].lower().find('peer in cluster') != -1: - return True - time.sleep(1) - return False - - def probe(host): - run_gluster([ 'peer', 'probe', host ]) - if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) - changed = True - - def probe_all_peers(hosts, peers, myhostname): - for host in hosts: - if host not in peers: - # dont probe ourselves - if myhostname != host: - probe(host) - - def create_volume(name, stripe, replica, transport, hosts, brick, force): - args = [ 'volume', 'create' ] - args.append(name) - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - args.append('transport') - args.append(transport) - for host in hosts: - args.append(('%s:%s' % (host, brick))) - if force: - args.append('force') - run_gluster(args) - - def start_volume(name): - run_gluster([ 'volume', 'start', name ]) - - def stop_volume(name): - run_gluster_yes([ 'volume', 'stop', name ]) - - def set_volume_option(name, option, parameter): - run_gluster([ 'volume', 'set', name, option, parameter ]) - - def add_brick(name, brick, force): - args = [ 'volume', 'add-brick', name, brick ] - if force: - args.append('force') - run_gluster(args) - - def rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) - - def enable_quota(name): - run_gluster([ 'volume', 'quota', name, 'enable' ]) - - def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': + if len(volume) > 0: + volumes[volume['name']] = volume + volume = {} + return volumes +def get_quotas(name, nofail): + quotas = {} + if nofail: + out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) + if not out: + return quotas + else: + out = run_gluster([ 'volume', 'quota', name, 'list' ]) + for row in out.split('\n'): + if row[:1] == '/': + q = re.split('\s+', row) + quotas[q[0]] = q[1] + return quotas + +def wait_for_peer(host): + for x in range(0, 4): + peers = get_peers() + if host in peers and peers[host][1].lower().find('peer in cluster') != -1: + return True + time.sleep(1) + return False + +def probe(host): + run_gluster([ 'peer', 'probe', host ]) + if not wait_for_peer(host): + module.fail_json(msg='failed to probe peer %s' % host) + changed = True + +def probe_all_peers(hosts, peers, myhostname): + for host in hosts: + if host not in peers: + # dont probe ourselves + if myhostname != host: + probe(host) + +def create_volume(name, stripe, replica, transport, hosts, brick, force): + args = [ 'volume', 'create' ] + args.append(name) + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.append('transport') + args.append(transport) + for host in hosts: + args.append(('%s:%s' % (host, brick))) + if force: + args.append('force') + run_gluster(args) + +def start_volume(name): + run_gluster([ 'volume', 'start', name ]) + +def stop_volume(name): + run_gluster_yes([ 'volume', 'stop', name ]) + +def set_volume_option(name, option, parameter): + run_gluster([ 'volume', 'set', name, option, parameter ]) + +def add_brick(name, brick, force): + args = [ 'volume', 'add-brick', name, brick ] + if force: + args.append('force') + run_gluster(args) + +def do_rebalance(name): + run_gluster(['volume', 'rebalance', name, 'start']) + +def enable_quota(name): + run_gluster([ 'volume', 'quota', name, 'enable' ]) + +def set_quota(name, directory, value): + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + +def main(): ### MAIN ### module = AnsibleModule( @@ -403,7 +416,7 @@ def main(): if changed: volumes = get_volumes() if rebalance: - rebalance(volume_name) + do_rebalance(volume_name) facts = {} facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas } From ee7fbcf418a77cb8d840d8ed9c567b179212be4a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 22:11:17 -0400 Subject: [PATCH 095/720] minor fix to method of finding home as previous could 'overmatch' --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..8c708dc31cd 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -398,7 +398,7 @@ LXC_ANSIBLE_STATES = { # home directory of the user that was attached to the container and source # that users environment variables by default. ATTACH_TEMPLATE = """#!/usr/bin/env bash -pushd "$(grep $(whoami) /etc/passwd | awk -F':' '{print $6}')" +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" if [[ -f ".bashrc" ]];then source .bashrc fi From 7794042cf65b075c9ca9bf4248df994bff94401f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 22:30:58 -0400 Subject: [PATCH 096/720] fixed missing parens --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index e78b1a1bfaa..1669dddb81c 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -139,7 +139,7 @@ def run_gluster(gargs, **kwargs): if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) except Exception, e: - module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)) + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) return out def run_gluster_nofail(gargs, **kwargs): From 58aab881c29587a5795b6721c9b199d148fceb84 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 12:12:19 +0200 Subject: [PATCH 097/720] cloudstack: add new module cloudstack_fw --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_fw.py | 267 ++++++++++++++++++++++++++++++ 2 files changed, 267 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_fw.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cloudstack_fw.py new file mode 100644 index 00000000000..0014f433c47 --- /dev/null +++ b/cloud/cloudstack/cloudstack_fw.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: cloudstack_fw +short_description: Manages firewall rules on Apache CloudStack based clouds. +description: Creates and removes firewall rules. +version_added: '2.0' +author: René Moser +options: + ip_address: + description: + - Public IP address the rule is assigned to. + required: true + state: + description: + - State of the firewall rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the firewall rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp' ] + cidr: + description: + - CIDR (full notation) to be used for firewall rule. + required: false + default: '0.0.0.0\0' + start_port + description: + - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + end_port + description: + - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + icmp_type + description: + - Type of the icmp message being sent. Considered if C(protocol=icmp). + required: false + default: null + icmp_code + description: + - Error code for this icmp message. Considered if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project. + required: false + default: null +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 80 + end_port: 80 + cidr: 1.2.3.4/32 + + +# Allow inbound tcp/udp port 53 to 4.3.2.1 +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 53 + end_port: 53 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Ensure firewall rule is removed +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 8000 + end_port: 8888 + cidr: 17.0.0.0/8 + state: absent +''' + +RETURN = ''' +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackFirewall(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.firewall_rule = None + + + def get_firewall_rule(self): + if not self.firewall_rule: + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not icmp_type: + self.module.fail_json(msg="no icmp_type set") + + args = {} + args['ipaddressid'] = self.get_ip_address_id() + args['projectid'] = self.get_project_id() + + firewall_rules = self.cs.listFirewallRules(**args) + if firewall_rules and 'firewallrule' in firewall_rules: + for rule in firewall_rules['firewallrule']: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) + + if type_match and protocol_match: + self.firewall_rule = rule + break + return self.firewall_rule + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == rule['icmpcode'] \ + and icmp_type == rule['icmptype'] + + + def _type_cidr_match(self, rule, cidr): + return cidr == rule['cidrlist'] + + + def create_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + self.result['changed'] = True + args = {} + args['cidrlist'] = self.module.params.get('cidr') + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['ipaddressid'] = self.get_ip_address_id() + + if not self.module.check_mode: + firewall_rule = self.cs.createFirewallRule(**args) + + return firewall_rule + + + def remove_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + args = {} + args['id'] = firewall_rule['id'] + + if not self.module.check_mode: + res = self.cs.deleteFirewallRule(**args) + + return firewall_rule + + + def get_result(self, firewall_rule): + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True, default=None), + cidr = dict(default='0.0.0.0/0'), + protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + required_together = ( + ['start_port', 'end_port'], + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_fw = AnsibleCloudStackFirewall(module) + + state = module.params.get('state') + if state in ['absent']: + fw_rule = acs_fw.remove_firewall_rule() + else: + fw_rule = acs_fw.create_firewall_rule() + + result = acs_fw.get_result(fw_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 9f85ae16ae81fc6fb8de71ab652771b1e2808143 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 13:33:57 +0200 Subject: [PATCH 098/720] cloudstack: add new module cloudstack_iso --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_iso.py | 322 +++++++++++++++++++++++++++++ 2 files changed, 322 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_iso.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_iso.py b/cloud/cloudstack/cloudstack_iso.py new file mode 100644 index 00000000000..bd90c427ea4 --- /dev/null +++ b/cloud/cloudstack/cloudstack_iso.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cloudstack_iso +short_description: Manages ISOs images on Apache CloudStack based clouds. +description: Register and remove ISO images. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the ISO. + required: true + url: + description: + - URL where the ISO can be downloaded from. Required if C(state) is present. + required: false + default: null + os_type: + description: + - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present. + required: false + default: null + is_ready: + description: + - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false). + required: false + default: false + aliases: [] + is_public: + description: + - Register the ISO to be publicly available to all users. Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the ISO to be featured. Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present. + required: false + default: false + aliases: [] + checksum: + description: + - The MD5 checksum value of this ISO. If set, we search by checksum instead of name. + required: false + default: false + bootable: + description: + - Register the ISO to be bootable. Only used if C(state) is present. + required: false + default: true + project: + description: + - Name of the project the ISO to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used. + required: false + default: null + iso_filter: + description: + - Name of the filter used to search for the ISO. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ] + state: + description: + - State of the ISO. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +''' + +EXAMPLES = ''' +--- +# Register an ISO if ISO name does not already exist. +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + + +# Register an ISO with given name if ISO md5 checksum does not already exist. +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: + checksum: 0b31bccccb048d20b551f70830bb7ad0 + + +# Remove an ISO by name +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + state: absent + + +# Remove an ISO by checksum +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + checksum: 0b31bccccb048d20b551f70830bb7ad0 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the ISO. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Text to be displayed of the ISO. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +zone: + description: Name of zone the ISO is registered in. + returned: success + type: string + sample: zuerich +status: + description: Status of the ISO. + returned: success + type: string + sample: Successfully Installed +is_ready: + description: True if the ISO is ready to be deployed from. + returned: success + type: boolean + sample: true +checksum: + description: MD5 checksum of the ISO. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackIso(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.iso = None + + def register_iso(self): + iso = self.get_iso() + if not iso: + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + + args['bootable'] = self.module.params.get('bootable') + args['ostypeid'] = self.get_os_type_id() + if args['bootable'] and not args['ostypeid']: + self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") + + args['url'] = self.module.params.get('url') + if not args['url']: + self.module.fail_json(msg="URL is requried.") + + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('name') + args['checksum'] = self.module.params.get('checksum') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.registerIso(**args) + iso = res['iso'][0] + return iso + + + def get_iso(self): + if not self.iso: + args = {} + args['isready'] = self.module.params.get('is_ready') + args['isofilter'] = self.module.params.get('iso_filter') + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + isos = self.cs.listIsos(**args) + if isos: + if not checksum: + self.iso = isos['iso'][0] + else: + for i in isos['iso']: + if i['checksum'] == checksum: + self.iso = i + break + return self.iso + + + def remove_iso(self): + iso = self.get_iso() + if iso: + self.result['changed'] = True + args = {} + args['id'] = iso['id'] + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + if not self.module.check_mode: + res = self.cs.deleteIso(**args) + return iso + + + def get_result(self, iso): + if iso: + if 'displaytext' in iso: + self.result['displaytext'] = iso['displaytext'] + if 'name' in iso: + self.result['name'] = iso['name'] + if 'zonename' in iso: + self.result['zone'] = iso['zonename'] + if 'checksum' in iso: + self.result['checksum'] = iso['checksum'] + if 'status' in iso: + self.result['status'] = iso['status'] + if 'isready' in iso: + self.result['is_ready'] = iso['isready'] + if 'created' in iso: + self.result['created'] = iso['created'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + url = dict(default=None), + os_type = dict(default=None), + zone = dict(default=None), + iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + project = dict(default=None), + checksum = dict(default=None), + is_ready = dict(choices=BOOLEANS, default=False), + bootable = dict(choices=BOOLEANS, default=True), + is_featured = dict(choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_iso = AnsibleCloudStackIso(module) + + state = module.params.get('state') + if state in ['absent']: + iso = acs_iso.remove_iso() + else: + iso = acs_iso.register_iso() + + result = acs_iso.get_result(iso) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 60467738edba9ea50578da38092ef7d6063495a4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 19:52:36 +0100 Subject: [PATCH 099/720] cloudstack: add new module cloudstack_sshkey This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_sshkey.py | 210 ++++++++++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_sshkey.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py new file mode 100644 index 00000000000..414ded6c971 --- /dev/null +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cloudstack_sshkey +short_description: Manages SSH keys on Apache CloudStack based clouds. +description: + - If no public key is provided, a new ssh private/public key pair will be + created and the private key will be returned. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of public key. + required: true + default: null + aliases: [] + project: + description: + - Name of the project the public key to be registered in. + required: false + default: null + aliases: [] + state: + description: + - State of the public key. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + aliases: [] + public_key: + description: + - String of the public key. + required: false + default: null + aliases: [] +''' + +EXAMPLES = ''' +--- +# create a new private / public key pair: +- local_action: cloudstack_sshkey name=linus@example.com + register: key +- debug: msg='private key is {{ key.private_key }}' + +# remove a public key by its name: +- local_action: cloudstack_sshkey name=linus@example.com state=absent + +# register your existing local public key: +- local_action: cloudstack_sshkey name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +''' + +RETURN = ''' +--- +name: + description: Name of the SSH public key. + returned: success + type: string + sample: linus@example.com +fingerprint: + description: Fingerprint of the SSH public key. + returned: success + type: string + sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" +private_key: + description: Private key of generated SSH keypair. + returned: changed + type: string + sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackSshKey(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.ssh_key = None + + + def register_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + args['publickey'] = self.module.params.get('public_key') + if not self.module.check_mode: + ssh_key = self.cs.registerSSHKeyPair(**args) + return ssh_key + + + def create_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.createSSHKeyPair(**args) + ssh_key = res['keypair'] + return ssh_key + + + def remove_ssh_key(self): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + args = {} + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.deleteSSHKeyPair(**args) + return ssh_key + + + def get_ssh_key(self): + if not self.ssh_key: + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + ssh_keys = self.cs.listSSHKeyPairs(**args) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + return self.ssh_key + + + def get_result(self, ssh_key): + if ssh_key: + if 'fingerprint' in ssh_key: + self.result['fingerprint'] = ssh_key['fingerprint'] + + if 'name' in ssh_key: + self.result['name'] = ssh_key['name'] + + if 'privatekey' in ssh_key: + self.result['private_key'] = ssh_key['privatekey'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + public_key = dict(default=None), + project = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sshkey = AnsibleCloudStackSshKey(module) + state = module.params.get('state') + if state in ['absent']: + ssh_key = acs_sshkey.remove_ssh_key() + else: + if module.params.get('public_key'): + ssh_key = acs_sshkey.register_ssh_key() + else: + ssh_key = acs_sshkey.create_ssh_key() + + result = acs_sshkey.get_result(ssh_key) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 82e25447adeab1c7d464e64119b594c5386506f8 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 10:58:02 +0100 Subject: [PATCH 100/720] cloudstack_ssh: fix missing projectid if state=absent --- cloud/cloudstack/cloudstack_sshkey.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 414ded6c971..97d6a222f09 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -139,6 +139,7 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): self.result['changed'] = True args = {} args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() if not self.module.check_mode: res = self.cs.deleteSSHKeyPair(**args) return ssh_key From bf32de8d8f705692662108759c5d2077baf6ddba Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:07:39 +0100 Subject: [PATCH 101/720] cloudstack_ssh: register_ssh_key() set public_key as param --- cloud/cloudstack/cloudstack_sshkey.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 97d6a222f09..589e2783913 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -107,14 +107,14 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): self.ssh_key = None - def register_ssh_key(self): + def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() if not ssh_key: self.result['changed'] = True args = {} args['projectid'] = self.get_project_id() args['name'] = self.module.params.get('name') - args['publickey'] = self.module.params.get('public_key') + args['publickey'] = public_key if not self.module.check_mode: ssh_key = self.cs.registerSSHKeyPair(**args) return ssh_key @@ -194,8 +194,9 @@ def main(): if state in ['absent']: ssh_key = acs_sshkey.remove_ssh_key() else: - if module.params.get('public_key'): - ssh_key = acs_sshkey.register_ssh_key() + public_key = module.params.get('public_key') + if public_key: + ssh_key = acs_sshkey.register_ssh_key(public_key) else: ssh_key = acs_sshkey.create_ssh_key() From a24d691419c41f0e64b95d3560237f3829340917 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:09:21 +0100 Subject: [PATCH 102/720] cloudstack_ssh: update description --- cloud/cloudstack/cloudstack_sshkey.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 589e2783913..7e803be02e5 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' module: cloudstack_sshkey short_description: Manages SSH keys on Apache CloudStack based clouds. description: - - If no public key is provided, a new ssh private/public key pair will be - created and the private key will be returned. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. version_added: '2.0' author: René Moser options: From c03baa7ec64a6ae6acbd4176c24d1e757b88c42b Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:12:19 +0100 Subject: [PATCH 103/720] cloudstack_ssh: replace ssh public key if fingerprints do not match --- cloud/cloudstack/cloudstack_sshkey.py | 38 ++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 7e803be02e5..4f63a9d566b 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -95,6 +95,12 @@ try: except ImportError: has_lib_cs = False +try: + import sshpubkeys + has_lib_sshpubkeys = True +except ImportError: + has_lib_sshpubkeys = False + from ansible.module_utils.cloudstack import * class AnsibleCloudStackSshKey(AnsibleCloudStack): @@ -109,14 +115,30 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() + + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + res = None if not ssh_key: self.result['changed'] = True - args = {} - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') args['publickey'] = public_key if not self.module.check_mode: - ssh_key = self.cs.registerSSHKeyPair(**args) + res = self.cs.registerSSHKeyPair(**args) + + else: + fingerprint = self._get_ssh_fingerprint(public_key) + if ssh_key['fingerprint'] != fingerprint: + self.result['changed'] = True + if not self.module.check_mode: + self.cs.deleteSSHKeyPair(**args) + args['publickey'] = public_key + res = self.cs.registerSSHKeyPair(**args) + + if res and 'keypair' in res: + ssh_key = res['keypair'] + return ssh_key @@ -170,6 +192,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): return self.result + def _get_ssh_fingerprint(self, public_key): + key = sshpubkeys.SSHKey(public_key) + return key.hash() + + def main(): module = AnsibleModule( argument_spec = dict( @@ -188,6 +215,9 @@ def main(): if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") + if not has_lib_sshpubkeys: + module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") + try: acs_sshkey = AnsibleCloudStackSshKey(module) state = module.params.get('state') From 392feaea63f845f53c02f70ea4a7dd3e723f3ed9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 11:55:39 +0200 Subject: [PATCH 104/720] cloudstack_sshkey: cleanup docs --- cloud/cloudstack/cloudstack_sshkey.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 4f63a9d566b..657e367fefe 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -32,27 +32,22 @@ options: description: - Name of public key. required: true - default: null - aliases: [] project: description: - Name of the project the public key to be registered in. required: false default: null - aliases: [] state: description: - State of the public key. required: false default: 'present' choices: [ 'present', 'absent' ] - aliases: [] public_key: description: - String of the public key. required: false default: null - aliases: [] ''' EXAMPLES = ''' From 39cff86e7b8161ac1ed6316059dd50965a2f1e1d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:33:59 -0400 Subject: [PATCH 105/720] fixed doc issues --- monitoring/zabbix_host.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 1c900a894e4..c7b8e52b9e7 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -74,8 +74,8 @@ options: interfaces: description: - List of interfaces to be created for the host (see example below). - - Available values are: dns, ip, main, port, type and useip. - - Please review the interface documentation for more information on the supported properties: + - 'Available values are: dns, ip, main, port, type and useip.' + - Please review the interface documentation for more information on the supported properties - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface required: false default: [] From 1754c7a1cad03fdc359823d94b1627039bea45f9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:36:51 -0400 Subject: [PATCH 106/720] fixed doc issues on zabbix_screen --- monitoring/zabbix_screen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 4c58c32d47e..ada2b1c6ab0 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -54,7 +54,7 @@ options: - If the screen(s) already been added, the screen(s) name won't be updated. - When creating or updating screen(s), the screen_name, host_group are required. - When deleting screen(s), the screen_name is required. - - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed. + - 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.' required: true notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. From eb04e45311683dba1d54c8e5db293a2d3877eb68 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:39:27 -0400 Subject: [PATCH 107/720] fixed doc issues with cloudstack_fw --- cloud/cloudstack/cloudstack_fw.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cloudstack_fw.py index 0014f433c47..cb60c1cde64 100644 --- a/cloud/cloudstack/cloudstack_fw.py +++ b/cloud/cloudstack/cloudstack_fw.py @@ -45,23 +45,23 @@ options: description: - CIDR (full notation) to be used for firewall rule. required: false - default: '0.0.0.0\0' - start_port + default: '0.0.0.0/0' + start_port: description: - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null - end_port + end_port: description: - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null - icmp_type + icmp_type: description: - Type of the icmp message being sent. Considered if C(protocol=icmp). required: false default: null - icmp_code + icmp_code: description: - Error code for this icmp message. Considered if C(protocol=icmp). required: false @@ -106,9 +106,6 @@ EXAMPLES = ''' state: absent ''' -RETURN = ''' -''' - try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True From 759e618c4ca94295e612e94c23193331fd2a1006 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 10:44:34 -0400 Subject: [PATCH 108/720] vertica doc fixes --- database/vertica/vertica_configuration.py | 36 ++++++------- database/vertica/vertica_facts.py | 26 +++++----- database/vertica/vertica_role.py | 39 +++++++------- database/vertica/vertica_schema.py | 55 ++++++++++---------- database/vertica/vertica_user.py | 63 +++++++++++------------ 5 files changed, 103 insertions(+), 116 deletions(-) diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index c7bdb1001d6..ad74c0f23f2 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -22,59 +22,55 @@ module: vertica_configuration version_added: '2.0' short_description: Updates Vertica configuration parameters. description: - Updates Vertica configuration parameters. + - Updates Vertica configuration parameters. options: name: description: - Name of the parameter to update. + - Name of the parameter to update. required: true - default: null value: description: - Value of the parameter to be set. + - Value of the parameter to be set. required: true - default: null db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: updating load_balance_policy vertica_configuration: name=failovertostandbyafter value='8 hours' """ diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index 4b963a4e377..b7e0ac4ad5a 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -22,11 +22,11 @@ module: vertica_facts version_added: '2.0' short_description: Gathers Vertica database facts. description: - Gathers Vertica database facts. + - Gathers Vertica database facts. options: cluster: description: - Name of the cluster running the schema. + - Name of the cluster running the schema. required: false default: localhost port: @@ -36,28 +36,28 @@ options: default: 5433 db: description: - Name of the database running the schema. + - Name of the database running the schema. required: false default: null login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index 825bb1b07e9..ef56a58a866 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -22,66 +22,63 @@ module: vertica_role version_added: '2.0' short_description: Adds or removes Vertica database roles and assigns roles to them. description: - Adds or removes Vertica database role and, optionally, assign other roles. + - Adds or removes Vertica database role and, optionally, assign other roles. options: name: description: - Name of the role to add or remove. + - Name of the role to add or remove. required: true - default: null assigned_roles: description: - Comma separated list of roles to assign to the role. - [Alias I(assigned_role)] + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] required: false default: null state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a role. + - Whether to create C(present), drop C(absent) or lock C(locked) a role. required: false choices: ['present', 'absent'] default: present db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica role vertica_role: name=role_name db=db_name state=present diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index f3a75055d06..d0ed2ce05b0 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -22,82 +22,79 @@ module: vertica_schema version_added: '2.0' short_description: Adds or removes Vertica database schema and roles. description: - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. - A schema will not be removed until all the objects have been dropped. - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. options: name: description: - Name of the schema to add or remove. + - Name of the schema to add or remove. required: true - default: null usage_roles: description: - Comma separated list of roles to create and grant usage access to the schema. - [Alias I(usage_role)] + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] required: false default: null create_roles: description: - Comma separated list of roles to create and grant usage and create access to the schema. - [Alias I(create_role)] + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] required: false default: null owner: description: - Name of the user to set as owner of the schema. + - Name of the user to set as owner of the schema. required: false default: null state: description: - Whether to create C(present), or drop C(absent) a schema. + - Whether to create C(present), or drop C(absent) a schema. required: false default: present choices: ['present', 'absent'] db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica schema vertica_schema: name=schema_name db=db_name state=present diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 1d72deca617..a011bf35adb 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -22,98 +22,95 @@ module: vertica_user version_added: '2.0' short_description: Adds or removes Vertica database users and assigns roles. description: - Adds or removes Vertica database user and, optionally, assigns roles. - A user will not be removed until all the dependencies have been dropped. - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. options: name: description: - Name of the user to add or remove. + - Name of the user to add or remove. required: true - default: null profile: description: - Sets the user's profile. + - Sets the user's profile. required: false default: null resource_pool: description: - Sets the user's resource pool. + - Sets the user's resource pool. required: false default: null password: description: - The user's password encrypted by the MD5 algorithm. - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select 'md5'||md5(''). + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). required: false default: null expired: description: - Sets the user's password expiration. + - Sets the user's password expiration. required: false default: null ldap: description: - Set to true if users are authenticated via LDAP. - The user will be created with password expired and set to I($ldap$). + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). required: false default: null roles: description: - Comma separated list of roles to assign to the user. - [Alias I(role)] + - Comma separated list of roles to assign to the user. + aliases: ['role'] required: false default: null state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a user. + - Whether to create C(present), drop C(absent) or lock C(locked) a user. required: false choices: ['present', 'absent', 'locked'] default: present db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica user with password vertica_user: name=user_name password=md5 db=db_name state=present From 30cf73e83a7a9d13faf88f5527581c11f605f317 Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Tue, 31 Mar 2015 17:19:11 +0100 Subject: [PATCH 109/720] Fix for issue #353 (handle change in ssh-keygen behaviour) Prior to openssh 6.4, ssh-keygen -F returned 0 (and no output) when no host was found. After then, it instead returned 1 and no output. This revised code behaves correctly with either behaviour. There is currently no other code path that results in exit(1) and no output. --- system/known_hosts.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index 893eca3dcb7..c2030758cc8 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -188,10 +188,14 @@ def search_for_host_key(module,host,key,path,sshkeygen): replace=False if os.path.exists(path)==False: return False, False + #openssh >=6.4 has changed ssh-keygen behaviour such that it returns + #1 if no host is found, whereas previously it returned 0 rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], - check_rc=True) - if stdout=='': #host not found - return False, False + check_rc=False) + if stdout=='' and stderr=='' and (rc==0 or rc==1): + return False, False #host not found, no other errors + if rc!=0: #something went wrong + module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) #If user supplied no key, we don't want to try and replace anything with it if key is None: From f901fd0160309d9fc2c84e849d34846fe94b35c8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 14:29:38 -0400 Subject: [PATCH 110/720] glusterbin needs to be global --- system/gluster_volume.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 1669dddb81c..e04df48d5f4 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -130,8 +130,10 @@ import shutil import time import socket +glusterbin = '' def run_gluster(gargs, **kwargs): + global glusterbin args = [glusterbin] args.extend(gargs) try: @@ -143,6 +145,7 @@ def run_gluster(gargs, **kwargs): return out def run_gluster_nofail(gargs, **kwargs): + global glusterbin args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, **kwargs) @@ -151,6 +154,7 @@ def run_gluster_nofail(gargs, **kwargs): return out def run_gluster_yes(gargs): + global glusterbin args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, data='y\n') @@ -312,6 +316,7 @@ def main(): ) ) + global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False From e5f4aa4dea1505e520eb62e24b90b64c81605ef9 Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Tue, 31 Mar 2015 16:43:40 -0400 Subject: [PATCH 111/720] \login_password with missing login_user not caught #363 --- database/misc/mongodb_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 3a3cf4dfff1..ecf8b33b607 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -222,7 +222,7 @@ def main(): if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: + elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: From 42e761df4a37be65f93ef729ccc59739e2123a20 Mon Sep 17 00:00:00 2001 From: Joe Ray Date: Wed, 1 Apr 2015 17:17:52 +0100 Subject: [PATCH 112/720] Import boto.ec2 in sns to allow boto profiles to be used --- notification/sns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/sns.py b/notification/sns.py index f2ed178554e..54421b0e9fa 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -105,6 +105,7 @@ from ansible.module_utils.ec2 import * try: import boto + import boto.ec2 import boto.sns except ImportError: print "failed=True msg='boto required for this module'" From 89284bcce012931340ee5212049f92795c90718f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Apr 2015 20:44:28 -0400 Subject: [PATCH 113/720] now also captrure OSErrors on temp file fixes #360 --- system/known_hosts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index c2030758cc8..b332528ed19 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -137,11 +137,11 @@ def enforce_state(module, params): outf.write(key) outf.close() module.atomic_move(outf.name,path) - except IOError,e: + except (IOError,OSError),e: module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) params['changed'] = True - + return params def sanity_check(module,host,key,sshkeygen): From 34312759213fb003438e89c7ce48aa26b329eea3 Mon Sep 17 00:00:00 2001 From: Andy Hill Date: Thu, 2 Apr 2015 13:44:07 -0400 Subject: [PATCH 114/720] bigip_facts: Add missing "device" option The device option was already implemented but omitted from docs and allowed choices. With the addition of device, a devices failover_state can be determined. --- network/f5/bigip_facts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..866119e94c3 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -70,8 +70,8 @@ options: required: true default: null choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', + 'device', 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan'] aliases: [] filter: @@ -1593,8 +1593,8 @@ def main(): regex = None include = map(lambda x: x.lower(), module.params['include']) valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', + 'device', 'device_group', 'interface', 'key', 'node', + 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) From 44280e461cdc9ecaf2696a0f076bbdde807b6e02 Mon Sep 17 00:00:00 2001 From: Gregory Haynes Date: Wed, 1 Apr 2015 11:44:05 -0700 Subject: [PATCH 115/720] Add type property to zypper To install patterns and other package types a type parameter needs to be specified to zypper. --- packaging/os/zypper.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index a6fdc5e7189..5daec8d1429 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -50,6 +50,12 @@ options: required: false choices: [ present, latest, absent ] default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage ] + default: "package" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package @@ -148,7 +154,7 @@ def get_package_state(m, packages): return installed_state # Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): packages = [] for package in name: if installed_state[package] is False: @@ -158,7 +164,7 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # add global options before zypper command if disable_gpg_check: cmd.append('--no-gpg-checks') - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) # add install parameter if disable_recommends and not old_zypper: cmd.append('--no-recommends') @@ -178,10 +184,10 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme return (rc, stdout, stderr, changed) # Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper) # if we've already made a change, we don't have to check whether a version changed if not changed: @@ -193,9 +199,9 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen cmd.append('--no-gpg-checks') if old_zypper: - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) else: - cmd.extend(['update', '--auto-agree-with-licenses']) + cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type]) cmd.extend(name) rc, stdout, stderr = m.run_command(cmd, check_rc=False) @@ -209,13 +215,13 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen return (rc, stdout, stderr, changed) # Function used to make sure a package is not installed. -def package_absent(m, name, installed_state, old_zypper): +def package_absent(m, name, installed_state, package_type, old_zypper): packages = [] for package in name: if installed_state[package] is True: packages.append(package) if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd) @@ -239,6 +245,7 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), ), @@ -250,6 +257,7 @@ def main(): name = params['name'] state = params['state'] + type_ = params['type'] disable_gpg_check = params['disable_gpg_check'] disable_recommends = params['disable_recommends'] @@ -272,11 +280,11 @@ def main(): # Perform requested action if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, old_zypper) + (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper) elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) if rc != 0: if stderr: From 7c9217a8f780b3617847891df2cd333915386bea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Wed, 8 Apr 2015 18:33:40 +0200 Subject: [PATCH 116/720] [lldp] Merge wrapped lines. Some devices return their description on multiple lines such as: lldp.eth0.chassis.descr=cisco CISCO7609-S running on Cisco IOS Software, c7600s72033_rp Software (c7600s72033_rp-IPSERVICESK9-M), Version 12.2(33)SRE3, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2011 by Cisco Systems, Inc. Compiled Wed 26-Jan-11 06:54 by prod_rel_team The generated fact will result as: "descr": "cisco CISCO7609-S running on" This patch fixes the line wrapping to return the full description handling line breaks: "descr": "cisco CISCO7609-S running on\nCisco IOS Software, c7600s72033_rp Software (c7600s72033_rp-IPSERVICESK9-M), Version 12.2(33)SRE3, RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2011 by Cisco Systems, Inc.\nCompiled Wed 26-Jan-11 06:54 by prod_rel_team" --- network/lldp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/network/lldp.py b/network/lldp.py index d30fa5d9a60..ea6dc78d7bc 100755 --- a/network/lldp.py +++ b/network/lldp.py @@ -58,6 +58,8 @@ def gather_lldp(): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: From 1f5e243acf2534c112c0f3fd01f67110635264ad Mon Sep 17 00:00:00 2001 From: mcameron Date: Wed, 8 Apr 2015 17:33:04 +0100 Subject: [PATCH 117/720] Revert "system/lvol: Suppress prompts from lvcreate" This reverts commit f8d04bec1bbdfb0e61e6d3255b16b5bfe23b42f1. --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index b14fd33c8e4..d9be9e7dc70 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -187,7 +187,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s --yes -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From 8e24529970c13738d9f397dc7da8204ab3eec97c Mon Sep 17 00:00:00 2001 From: Simon Olofsson Date: Wed, 11 Feb 2015 23:24:36 +0100 Subject: [PATCH 118/720] homebrew: Package name is not required. e.g. `- homebrew: update_homebrew=yes upgrade_all=yes' is a valid task. --- packaging/os/homebrew.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 2ecac0c4ace..aac4efd827e 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -31,7 +31,8 @@ options: name: description: - name of package to install/remove - required: true + required: false + default: None state: description: - state of the package @@ -48,7 +49,7 @@ options: description: - upgrade all homebrew packages required: false - default: no + default: "no" choices: [ "yes", "no" ] install_options: description: From 6121af6b8ec26ebcd8e67da3f78819365fd61c1c Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Thu, 9 Apr 2015 14:03:14 -0400 Subject: [PATCH 119/720] fixes issue #362 --- database/misc/mongodb_user.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index ecf8b33b607..d8b98f595eb 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -134,7 +134,15 @@ else: # MongoDB module specific support methods. # +def user_find(client, user): + for mongo_user in client["admin"].system.users.find(): + if mongo_user['user'] == user: + return mongo_user + return False + def user_add(module, client, db_name, user, password, roles): + #pymono's user_add is a _create_or_update_user so we won't know if it was changed or updated + #without reproducing a lot of the logic in database.py of pymongo db = client[db_name] if roles is None: db.add_user(user, password, False) @@ -147,9 +155,13 @@ def user_add(module, client, db_name, user, password, roles): err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' module.fail_json(msg=err_msg) -def user_remove(client, db_name, user): - db = client[db_name] - db.remove_user(user) +def user_remove(module, client, db_name, user): + exists = user_find(client, user) + if exists: + db = client[db_name] + db.remove_user(user) + else: + module.exit_json(changed=False, user=user) def load_mongocnf(): config = ConfigParser.RawConfigParser() @@ -208,15 +220,6 @@ def main(): else: client = MongoClient(login_host, int(login_port), ssl=ssl) - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: @@ -227,6 +230,10 @@ def main(): if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password) + elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): + if db_name != "admin": + module.fail_json(msg='The localhost login exception only allows the first admin account to be created') + #else: this has to be the first admin user added except ConnectionFailure, e: module.fail_json(msg='unable to connect to database: %s' % str(e)) @@ -242,7 +249,7 @@ def main(): elif state == 'absent': try: - user_remove(client, db_name, user) + user_remove(module, client, db_name, user) except OperationFailure, e: module.fail_json(msg='Unable to remove user: %s' % str(e)) From 70ae77a365d954ed2cbf08947f165917e9ae1a37 Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Thu, 9 Apr 2015 14:22:24 -0400 Subject: [PATCH 120/720] #364 Added support for update_password=dict(default="always", choices=["always", "on_create"]) --- database/misc/mongodb_user.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index d8b98f595eb..10cf62cd9a0 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -87,6 +87,14 @@ options: required: false default: present choices: [ "present", "absent" ] + update_password: + required: false + default: always + choices: ['always', 'on_create'] + version_added: "2.1" + description: + - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html @@ -196,6 +204,7 @@ def main(): ssl=dict(default=False), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default="always", choices=["always", "on_create"]), ) ) @@ -213,6 +222,7 @@ def main(): ssl = module.params['ssl'] roles = module.params['roles'] state = module.params['state'] + update_password = module.params['update_password'] try: if replica_set: @@ -239,8 +249,11 @@ def main(): module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': - if password is None: - module.fail_json(msg='password parameter required when adding a user') + if password is None and update_password == 'always': + module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') + + if update_password != 'always' and user_find(client, user): + password = None try: user_add(module, client, db_name, user, password, roles) From 14bb55e67e92593ce3498f1cbd016847c2344ad0 Mon Sep 17 00:00:00 2001 From: rhorer Date: Fri, 10 Apr 2015 15:27:34 -0500 Subject: [PATCH 121/720] Update twilio.py module name in Examples --- notification/twilio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index 8969c28aa50..e50879cd62d 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -57,14 +57,14 @@ author: Matt Makai EXAMPLES = ''' # send a text message from the local server about the build status to (555) 303 5681 # note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." +- local_action: twilio msg="All servers with webserver role are now configured." account_sid={{ twilio_account_sid }} auth_token={{ twilio_auth_token }} from_number=+15552014545 to_number=+15553035681 # send a text message from a server to (555) 111 3232 # note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." +- twilio: msg="This server's configuration is now complete." account_sid={{ twilio_account_sid }} auth_token={{ twilio_auth_token }} from_number=+15553258899 to_number=+15551113232 From eaa136cb083697824e71e4dd0401f1dee9091cc2 Mon Sep 17 00:00:00 2001 From: Jeferson Daniel Date: Sat, 11 Apr 2015 16:00:43 -0300 Subject: [PATCH 122/720] Fixes #335 --- packaging/language/bower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 3fccf51056b..085f454e639 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -108,7 +108,7 @@ class Bower(object): return '' def list(self): - cmd = ['list', '--json'] + cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] installed = list() missing = list() From e19b53532b4750002dfb52aa930b77e378cf2f68 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 4 Apr 2015 00:03:24 +0200 Subject: [PATCH 123/720] cloudstack: add new module cs_affinitygroup This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_affinitygroup.py | 230 +++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 cloud/cloudstack/cs_affinitygroup.py diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py new file mode 100644 index 00000000000..59c21ee46f6 --- /dev/null +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_affinitygroup +short_description: Manages affinity groups on Apache CloudStack based clouds. +description: Create and remove affinity groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the affinity group. + required: true + affinty_type: + description: + - Type of the affinity group. If not specified, first found affinity type is used. + required: false + default: null + description: + description: + - Description of the affinity group. + required: false + default: null + state: + description: + - State of the affinity group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + affinty_type: host anti-affinity + + +# Remove a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of affinity group. + returned: success + type: string + sample: app +description: + description: Description of affinity group. + returned: success + type: string + sample: application affinity group +affinity_type: + description: Type of affinity group. + returned: success + type: string + sample: host anti-affinity +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.affinity_group = None + + + def get_affinity_group(self): + if not self.affinity_group: + affinity_group_name = self.module.params.get('name') + + affinity_groups = self.cs.listAffinityGroups() + if affinity_groups: + for a in affinity_groups['affinitygroup']: + if a['name'] == affinity_group_name: + self.affinity_group = a + break + return self.affinity_group + + + def get_affinity_type(self): + affinity_type = self.module.params.get('affinty_type') + + affinity_types = self.cs.listAffinityGroupTypes() + if affinity_types: + if not affinity_type: + return affinity_types['affinityGroupType'][0]['type'] + + for a in affinity_types['affinityGroupType']: + if a['type'] == affinity_type: + return a['type'] + self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) + + + def create_affinity_group(self): + affinity_group = self.get_affinity_group() + if not affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['type'] = self.get_affinity_type() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + affinity_group = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def remove_affinity_group(self): + affinity_group = self.get_affinity_group() + if affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + + if not self.module.check_mode: + res = self.cs.deleteAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def get_result(self, affinity_group): + if affinity_group: + if 'name' in affinity_group: + self.result['name'] = affinity_group['name'] + if 'description' in affinity_group: + self.result['description'] = affinity_group['description'] + if 'type' in affinity_group: + self.result['affinity_type'] = affinity_group['type'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + affinty_type = dict(default=None), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ag = AnsibleCloudStackAffinityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + affinity_group = acs_ag.remove_affinity_group() + else: + affinity_group = acs_ag.create_affinity_group() + + result = acs_ag.get_result(affinity_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From df23b4d17b0c7de5567b14f5dcfd9d46643b3032 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 21:31:42 +0200 Subject: [PATCH 124/720] cloudstack: add new module cs_securitygroup This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_securitygroup.py | 195 +++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 cloud/cloudstack/cs_securitygroup.py diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py new file mode 100644 index 00000000000..4e2856d5a90 --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: cs_securitygroup +short_description: Manages security groups on Apache CloudStack based clouds. +description: Create and remove security groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: false + default: null + state: + description: + - State of the security group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + project: + description: + - Name of the project the security group to be created in. + required: false + default: null +''' + +EXAMPLES = ''' +--- +# Create a security group +- local_action: + module: cs_securitygroup + name: default + description: default security group + + +# Remove a security group +- local_action: + module: cs_securitygroup + name: default + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of security group. + returned: success + type: string + sample: app +description: + description: Description of security group. + returned: success + type: string + sample: application security group +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.security_group = None + + + def get_security_group(self): + if not self.security_group: + sg_name = self.module.params.get('name') + args = {} + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if sgs: + for s in sgs['securitygroup']: + if s['name'] == sg_name: + self.security_group = s + break + return self.security_group + + + def create_security_group(self): + security_group = self.get_security_group() + if not security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + security_group = res['securitygroup'] + + return security_group + + + def remove_security_group(self): + security_group = self.get_security_group() + if security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + + if not self.module.check_mode: + res = self.cs.deleteSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + return security_group + + + def get_result(self, security_group): + if security_group: + if 'name' in security_group: + self.result['name'] = security_group['name'] + if 'description' in security_group: + self.result['description'] = security_group['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg = AnsibleCloudStackSecurityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + sg = acs_sg.remove_security_group() + else: + sg = acs_sg.create_security_group() + + result = acs_sg.get_result(sg) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From b8056e8f6f71ef4270910e57e38e0fc98ad52412 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 3 Apr 2015 22:27:42 +0200 Subject: [PATCH 125/720] cloudstack: add new module cs_securitygroup_rule This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_securitygroup_rule.py | 437 ++++++++++++++++++++++ 1 file changed, 437 insertions(+) create mode 100644 cloud/cloudstack/cs_securitygroup_rule.py diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py new file mode 100644 index 00000000000..a170230acac --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -0,0 +1,437 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup_rule +short_description: Manages security group rules on Apache CloudStack based clouds. +description: Add and remove security group rules. +version_added: '2.0' +author: René Moser +options: + security_group: + description: + - Name of the security group the rule is related to. The security group must be existing. + required: true + state: + description: + - State of the security group rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the security group rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ] + type: + description: + - Ingress or egress security group rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] + cidr: + description: + - CIDR (full notation) to be used for security group rule. + required: false + default: '0.0.0.0/0' + user_security_group + description: + - Security group this rule is based of. + required: false + default: null + start_port + description: + - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + aliases: [ 'port' ] + end_port + description: + - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. + required: false + default: null + icmp_type + description: + - Type of the icmp message being sent. Required if C(protocol=icmp). + required: false + default: null + icmp_code + description: + - Error code for this icmp message. Required if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project the security group to be created in. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + cidr: 1.2.3.4/32 + + +# Allow tcp/udp outbound added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + type: egress + start_port: 1 + end_port: 65535 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Allow inbound icmp from 0.0.0.0/0 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + protocol: icmp + icmp_code: -1 + icmp_type: -1 + + +# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + state: absent + + +# Allow inbound port 80/tcp from security group web added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + user_security_group: web +''' + +RETURN = ''' +--- +security_group: + description: security group of the rule. + returned: success + type: string + sample: default +type: + description: type of the rule. + returned: success + type: string + sample: ingress +cidr: + description: CIDR of the rule. + returned: success and cidr is defined + type: string + sample: 0.0.0.0/0 +user_security_group: + description: user security group of the rule. + returned: success and user_security_group is defined + type: string + sample: default +protocol: + description: protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: end port of the rule. + returned: success + type: int + sample: 80 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == int(rule['icmpcode']) \ + and icmp_type == int(rule['icmptype']) + + + def _ah_esp_gre_match(self, rule, protocol): + return protocol in ['ah', 'esp', 'gre'] \ + and protocol == rule['protocol'] + + + def _type_security_group_match(self, rule, security_group_name): + return security_group_name \ + and 'securitygroupname' in rule \ + and security_group_name == rule['securitygroupname'] + + + def _type_cidr_match(self, rule, cidr): + return 'cidr' in rule \ + and cidr == rule['cidr'] + + + def _get_rule(self, rules): + user_security_group_name = self.module.params.get('user_security_group') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if not end_port: + end_port = start_port + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not (icmp_type and icmp_code): + self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) + + for rule in rules: + if user_security_group_name: + type_match = self._type_security_group_match(rule, user_security_group_name) + else: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._ah_esp_gre_match(rule, protocol) + ) + + if type_match and protocol_match: + return rule + return None + + + def get_security_group(self, security_group_name=None): + if not security_group_name: + security_group_name = self.module.params.get('security_group') + args = {} + args['securitygroupname'] = security_group_name + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if not sgs or 'securitygroup' not in sgs: + self.module.fail_json(msg="security group '%s' not found" % security_group_name) + return sgs['securitygroup'][0] + + + def add_rule(self): + security_group = self.get_security_group() + + args = {} + user_security_group_name = self.module.params.get('user_security_group') + + # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. + # that is why we ignore if we have a user_security_group. + if user_security_group_name: + args['usersecuritygrouplist'] = [] + user_security_group = self.get_security_group(user_security_group_name) + args['usersecuritygrouplist'].append({ + 'group': user_security_group['name'], + 'account': user_security_group['account'], + }) + else: + args['cidrlist'] = self.module.params.get('cidr') + + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['projectid'] = self.get_project_id() + args['securitygroupid'] = security_group['id'] + + if not args['endport']: + args['endport'] = args['startport'] + + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupIngress(**args) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupEgress(**args) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + security_group = self._poll_job(res, 'securitygroup') + return security_group + + + def remove_rule(self): + security_group = self.get_security_group() + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid']) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'securitygroup') + return security_group + + + def get_result(self, security_group_rule): + type = self.module.params.get('type') + + key = 'ingressrule' + if type == 'egress': + key = 'egressrule' + + self.result['type'] = type + self.result['security_group'] = self.module.params.get('security_group') + + if key in security_group_rule and security_group_rule[key]: + if 'securitygroupname' in security_group_rule[key][0]: + self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname'] + if 'cidr' in security_group_rule[key][0]: + self.result['cidr'] = security_group_rule[key][0]['cidr'] + if 'protocol' in security_group_rule[key][0]: + self.result['protocol'] = security_group_rule[key][0]['protocol'] + if 'startport' in security_group_rule[key][0]: + self.result['start_port'] = security_group_rule[key][0]['startport'] + if 'endport' in security_group_rule[key][0]: + self.result['end_port'] = security_group_rule[key][0]['endport'] + if 'icmpcode' in security_group_rule[key][0]: + self.result['icmp_code'] = security_group_rule[key][0]['icmpcode'] + if 'icmptype' in security_group_rule[key][0]: + self.result['icmp_type'] = security_group_rule[key][0]['icmptype'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + security_group = dict(required=True), + type = dict(choices=['ingress', 'egress'], default='ingress'), + cidr = dict(default='0.0.0.0/0'), + user_security_group = dict(default=None), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None, aliases=['port']), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['icmp_code', 'start_port'], + ['icmp_code', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) + + state = module.params.get('state') + if state in ['absent']: + sg_rule = acs_sg_rule.remove_rule() + else: + sg_rule = acs_sg_rule.add_rule() + + result = acs_sg_rule.get_result(sg_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 39d30168a59bd1ca296abd338f422e212b62e77f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 12 Apr 2015 12:59:03 +0200 Subject: [PATCH 126/720] cloudstack: rename modules to more meaningful name schema --- cloud/cloudstack/{cloudstack_fw.py => cs_firewall.py} | 8 ++++---- cloud/cloudstack/{cloudstack_iso.py => cs_iso.py} | 10 +++++----- .../{cloudstack_sshkey.py => cs_sshkeypair.py} | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) rename cloud/cloudstack/{cloudstack_fw.py => cs_firewall.py} (98%) rename cloud/cloudstack/{cloudstack_iso.py => cs_iso.py} (98%) rename cloud/cloudstack/{cloudstack_sshkey.py => cs_sshkeypair.py} (96%) diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cs_firewall.py similarity index 98% rename from cloud/cloudstack/cloudstack_fw.py rename to cloud/cloudstack/cs_firewall.py index cb60c1cde64..9049f40f7c4 100644 --- a/cloud/cloudstack/cloudstack_fw.py +++ b/cloud/cloudstack/cs_firewall.py @@ -19,7 +19,7 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' -module: cloudstack_fw +module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. description: Creates and removes firewall rules. version_added: '2.0' @@ -77,7 +77,7 @@ EXAMPLES = ''' --- # Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 80 end_port: 80 @@ -86,7 +86,7 @@ EXAMPLES = ''' # Allow inbound tcp/udp port 53 to 4.3.2.1 - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 53 end_port: 53 @@ -98,7 +98,7 @@ EXAMPLES = ''' # Ensure firewall rule is removed - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 8000 end_port: 8888 diff --git a/cloud/cloudstack/cloudstack_iso.py b/cloud/cloudstack/cs_iso.py similarity index 98% rename from cloud/cloudstack/cloudstack_iso.py rename to cloud/cloudstack/cs_iso.py index bd90c427ea4..42f00fb1f00 100644 --- a/cloud/cloudstack/cloudstack_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: cloudstack_iso +module: cs_iso short_description: Manages ISOs images on Apache CloudStack based clouds. description: Register and remove ISO images. version_added: '2.0' @@ -100,7 +100,7 @@ EXAMPLES = ''' --- # Register an ISO if ISO name does not already exist. - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: Debian GNU/Linux 7(64-bit) @@ -108,7 +108,7 @@ EXAMPLES = ''' # Register an ISO with given name if ISO md5 checksum does not already exist. - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: @@ -117,14 +117,14 @@ EXAMPLES = ''' # Remove an ISO by name - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit state: absent # Remove an ISO by checksum - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit checksum: 0b31bccccb048d20b551f70830bb7ad0 state: absent diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cs_sshkeypair.py similarity index 96% rename from cloud/cloudstack/cloudstack_sshkey.py rename to cloud/cloudstack/cs_sshkeypair.py index 657e367fefe..9cc514c05ea 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: cloudstack_sshkey +module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: - If no key was found and no public key was provided and a new SSH @@ -53,15 +53,15 @@ options: EXAMPLES = ''' --- # create a new private / public key pair: -- local_action: cloudstack_sshkey name=linus@example.com +- local_action: cs_sshkeypair name=linus@example.com register: key - debug: msg='private key is {{ key.private_key }}' # remove a public key by its name: -- local_action: cloudstack_sshkey name=linus@example.com state=absent +- local_action: cs_sshkeypair name=linus@example.com state=absent # register your existing local public key: -- local_action: cloudstack_sshkey name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' ''' RETURN = ''' From c899e2d9a7fe000442363112d308af59f26ed054 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 4 Apr 2015 01:30:17 +0200 Subject: [PATCH 127/720] cloudstack: add new module cs_vmsnapshot This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_vmsnapshot.py | 278 ++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 cloud/cloudstack/cs_vmsnapshot.py diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py new file mode 100644 index 00000000000..d53a33ac72e --- /dev/null +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_vmsnapshot +short_description: Manages VM snapshots on Apache CloudStack based clouds. +description: Create, remove and revert VM from snapshots. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Unique Name of the snapshot. In CloudStack terms C(displayname). + required: true + aliases: ['displayname'] + description: + description: + - Description of the snapshot. + required: false + default: null + snapshot_memory: + description: + - Snapshot memory if set to true. + required: false + default: false + project: + description: + - Name of the project the VM is assigned to. + required: false + default: null + state: + description: + - State of the snapshot. + required: false + default: 'present' + choices: [ 'present', 'absent', 'revert' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a VM snapshot of disk and memory before an upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + snapshot_memory: yes + + +# Revert a VM to a snapshot after a failed upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: revert + + +# Remove a VM snapshot after successful upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the snapshot. + returned: success + type: string + sample: snapshot before update +displayname: + description: displayname of the snapshot. + returned: success + type: string + sample: snapshot before update +created: + description: date of the snapshot. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +current: + description: true if snapshot is current + returned: success + type: boolean + sample: True +state: + description: state of the vm snapshot + returned: success + type: string + sample: Allocated +type: + description: type of vm snapshot + returned: success + type: string + sample: DiskAndMemory +description: + description: + description: description of vm snapshot + returned: success + type: string + sample: snapshot brought to you by Ansible +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def get_snapshot(self): + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + snapshots = self.cs.listVMSnapshot(**args) + if snapshots: + return snapshots['vmSnapshot'][0] + return None + + + def create_snapshot(self): + snapshot = self.get_snapshot() + if not snapshot: + self.result['changed'] = True + + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['name'] = self.module.params.get('name') + args['description'] = self.module.params.get('description') + args['snapshotmemory'] = self.module.params.get('snapshot_memory') + + if not self.module.check_mode: + res = self.cs.createVMSnapshot(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + snapshot = self._poll_job(res, 'vmsnapshot') + + return snapshot + + + def remove_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + + def revert_vm_to_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + + if snapshot['state'] != "Ready": + self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state']) + + if not self.module.check_mode: + res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + self.module.fail_json(msg="snapshot not found, could not revert VM") + + + def get_result(self, snapshot): + if snapshot: + if 'displayname' in snapshot: + self.result['displayname'] = snapshot['displayname'] + if 'created' in snapshot: + self.result['created'] = snapshot['created'] + if 'current' in snapshot: + self.result['current'] = snapshot['current'] + if 'state' in snapshot: + self.result['state'] = snapshot['state'] + if 'type' in snapshot: + self.result['type'] = snapshot['type'] + if 'name' in snapshot: + self.result['name'] = snapshot['name'] + if 'description' in snapshot: + self.result['description'] = snapshot['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['displayname']), + vm = dict(required=True), + description = dict(default=None), + project = dict(default=None), + snapshot_memory = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent', 'revert'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) + + state = module.params.get('state') + if state in ['revert']: + snapshot = acs_vmsnapshot.revert_vm_to_snapshot() + elif state in ['absent']: + snapshot = acs_vmsnapshot.remove_snapshot() + else: + snapshot = acs_vmsnapshot.create_snapshot() + + result = acs_vmsnapshot.get_result(snapshot) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 757a047a793cdb337a3465517dfbd97774fdcd87 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 10:04:10 -0400 Subject: [PATCH 128/720] adding sendgrid module to extras --- notification/sendgrid.py | 143 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 notification/sendgrid.py diff --git a/notification/sendgrid.py b/notification/sendgrid.py new file mode 100644 index 00000000000..6c5264521c2 --- /dev/null +++ b/notification/sendgrid.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "2.0" +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - Sends an email with a SendGrid account through their API, not through + the SMTP service. +notes: + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account. +options: + username: + description: + username for logging into the SendGrid account + required: true + password: + description: password that corresponds to the username + required: true + from_address: + description: + the address in the "from" field for the email + required: true + to_addresses: + description: + a list with one or more recipient email addresses + required: true + subject: + description: + the desired subject for the email + required: true + +requirements: [ urllib, urllib2 ] +author: Matt Makai +''' + +EXAMPLES = ''' +# send an email to a single recipient that the deployment was successful +- local_action: sendgrid + username={{ sendgrid_username }} + password={{ sendgrid_password }} + from_address="ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject="Deployment success." + body="The most recent Ansible deployment was successful." + +# send an email to more than one recipient that the build failed +- local_action: sendgrid + username={{ sendgrid_username }} + password={{ sendgrid_password }} + from_address="build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject="Build failure!." + body="Unable to pull source repository from Git server." +''' + +# ======================================= +# sendgrid module support methods +# +try: + import urllib, urllib2 +except ImportError: + module.fail_json(msg="urllib and urllib2 are required") + +import base64 + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body): + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible/1.7" + data = {'api_user':username, 'api_key':password, + 'from':from_address, 'subject': subject, 'text': body} + encoded_data = urllib.urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + to_addresses_api += '&to[]=%s' % str(recipient) + encoded_data += to_addresses_api + request = urllib2.Request(SENDGRID_URI) + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/json') + return urllib2.urlopen(request, encoded_data) + + +# ======================================= +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=True), + password=dict(required=True, no_log=True), + from_address=dict(required=True), + to_addresses=dict(required=True, type='list'), + subject=dict(required=True), + body=dict(required=True), + ), + supports_check_mode=True + ) + + username = module.params['username'] + password = module.params['password'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + + try: + response = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body) + except Exception, e: + module.fail_json(msg="unable to send email through SendGrid API") + + module.exit_json(msg=subject, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() From 2f2a69ad8832446f2992286c8ab5822c480c695d Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 10:25:24 -0400 Subject: [PATCH 129/720] updating sendgrid module based on code review by @abadger --- notification/sendgrid.py | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 6c5264521c2..d8bfb7d6a2e 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -27,8 +27,10 @@ description: - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid account. options: username: @@ -51,31 +53,32 @@ options: the desired subject for the email required: true -requirements: [ urllib, urllib2 ] author: Matt Makai ''' EXAMPLES = ''' # send an email to a single recipient that the deployment was successful -- local_action: sendgrid - username={{ sendgrid_username }} - password={{ sendgrid_password }} - from_address="ansible@mycompany.com" - to_addresses: - - "ops@mycompany.com" - subject="Deployment success." - body="The most recent Ansible deployment was successful." +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost # send an email to more than one recipient that the build failed -- local_action: sendgrid - username={{ sendgrid_username }} - password={{ sendgrid_password }} - from_address="build@mycompany.com" +- sendgrid + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" to_addresses: - "ops@mycompany.com" - "devteam@mycompany.com" - subject="Build failure!." - body="Unable to pull source repository from Git server." + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost ''' # ======================================= @@ -91,13 +94,15 @@ import base64 def post_sendgrid_api(module, username, password, from_address, to_addresses, subject, body): SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" - AGENT = "Ansible/1.7" - data = {'api_user':username, 'api_key':password, + AGENT = "Ansible" + data = {'api_user': username, 'api_key':password, 'from':from_address, 'subject': subject, 'text': body} encoded_data = urllib.urlencode(data) to_addresses_api = '' for recipient in to_addresses: - to_addresses_api += '&to[]=%s' % str(recipient) + if isinstance(recipient, unicode): + recipient = recipient.encode('utf-8') + to_addresses_api += '&to[]=%s' % recipient encoded_data += to_addresses_api request = urllib2.Request(SENDGRID_URI) request.add_header('User-Agent', AGENT) @@ -133,7 +138,7 @@ def main(): try: response = post_sendgrid_api(module, username, password, from_address, to_addresses, subject, body) - except Exception, e: + except Exception: module.fail_json(msg="unable to send email through SendGrid API") module.exit_json(msg=subject, changed=False) From 3e1ffd12c725080979c6e7625c6119cd378033f2 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:22:09 -0400 Subject: [PATCH 130/720] updating twilio module docs and making it possible to send to a list of phone numbers --- notification/twilio.py | 82 +++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index e50879cd62d..a95f21bde1f 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Matt Makai +# (c) 2015, Matt Makai # # This file is part of Ansible # @@ -24,18 +24,20 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through an the Twilio SMS service. + - Sends a text message to a phone number through the Twilio SMS API. notes: - - Like the other notification modules, this one requires an external + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: - user's account id for Twilio found on the account page + user's Twilio account token found on the account page required: true auth_token: - description: user's authentication token for Twilio found on the account page + description: user's Twilio authentication token required: true msg: description: @@ -43,36 +45,45 @@ options: required: true to_number: description: - what phone number to send the text message to, format +15551112222 + one or more phone numbers to send the text message to, + format +15551112222 required: true from_number: description: - what phone number to send the text message from, format +15551112222 + the Twilio number to send the text message from, format +15551112222 required: true - -requirements: [ urllib, urllib2 ] + author: Matt Makai ''' EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: twilio msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- twilio: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - +# send an SMS about the build status to (555) 303 5681 +# note: you have to have the 'from_number' on your Twilio account +- twilio: + msg: "All servers with webserver role are now configured." + account_sid: "{{ twilio_account_sid }}" + auth_token: "{{ twilio_auth_token }}" + from_number: "+15552014545" + to_number: "+15553035681" + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: you must have the 'from_number' on your Twilio account +- twilio: + msg: "This server's configuration is now complete." + account_sid: "{{ twilio_account_sid }}" + auth_token: "{{ twilio_auth_token }}" + from_number: "+15553258899" + to_number: + - "+15551113232" + - "+12025551235" + - "+19735559010" + delegate_to: localhost + ''' # ======================================= -# text module support methods +# twilio module support methods # try: import urllib, urllib2 @@ -82,10 +93,11 @@ except ImportError: import base64 -def post_text(module, account_sid, auth_token, msg, from_number, to_number): +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) - AGENT = "Ansible/1.5" + AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} encoded_data = urllib.urlencode(data) @@ -94,7 +106,7 @@ def post_text(module, account_sid, auth_token, msg, from_number, to_number): (account_sid, auth_token)).replace('\n', '') request.add_header('User-Agent', AGENT) request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') + request.add_header('Accept', 'application/json') request.add_header('Authorization', 'Basic %s' % base64string) return urllib2.urlopen(request, encoded_data) @@ -115,7 +127,7 @@ def main(): ), supports_check_mode=True ) - + account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] @@ -123,12 +135,18 @@ def main(): to_number = module.params['to_number'] try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: + if isinstance(to_number, list): + for number in to_number: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, number) + else: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, to_number) + pass + except Exception: module.fail_json(msg="unable to send text message to %s" % to_number) - module.exit_json(msg=msg, changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * From 9f4ad0246946c4e21aaf2547f7d311de99879720 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:42:50 -0400 Subject: [PATCH 131/720] updating twilio module to optionally support MMS --- notification/twilio.py | 43 ++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index a95f21bde1f..00bde6cc8f2 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -24,7 +24,7 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through the Twilio SMS API. + - Sends a text message to a phone number through the Twilio messaging API. notes: - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. @@ -52,27 +52,34 @@ options: description: the Twilio number to send the text message from, format +15551112222 required: true + media_url: + description: + a URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS + required: false author: Matt Makai ''' EXAMPLES = ''' # send an SMS about the build status to (555) 303 5681 -# note: you have to have the 'from_number' on your Twilio account +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account - twilio: msg: "All servers with webserver role are now configured." - account_sid: "{{ twilio_account_sid }}" - auth_token: "{{ twilio_auth_token }}" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15552014545" to_number: "+15553035681" delegate_to: localhost # send an SMS to multiple phone numbers about the deployment -# note: you must have the 'from_number' on your Twilio account +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account - twilio: msg: "This server's configuration is now complete." - account_sid: "{{ twilio_account_sid }}" - auth_token: "{{ twilio_auth_token }}" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15553258899" to_number: - "+15551113232" @@ -80,6 +87,18 @@ EXAMPLES = ''' - "+19735559010" delegate_to: localhost +# send an MMS to multiple phone numbers with an update on the +# deployment and a screenshot of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "Deployment complete!" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + media_url: "https://demo.twilio.com/logo.png" + delegate_to: localhost ''' # ======================================= @@ -94,12 +113,14 @@ import base64 def post_twilio_api(module, account_sid, auth_token, msg, from_number, - to_number): + to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} + if media_url: + data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) request = urllib2.Request(URI) base64string = base64.encodestring('%s:%s' % \ @@ -124,6 +145,7 @@ def main(): msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), + media_url=dict(default=None, required=False), ), supports_check_mode=True ) @@ -133,15 +155,16 @@ def main(): msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] + media_url = module.params['media_url'] try: if isinstance(to_number, list): for number in to_number: post_twilio_api(module, account_sid, auth_token, msg, - from_number, number) + from_number, number, media_url) else: post_twilio_api(module, account_sid, auth_token, msg, - from_number, to_number) + from_number, to_number, media_url) pass except Exception: module.fail_json(msg="unable to send text message to %s" % to_number) From c72be32b94b6fd2a19acad113c6d2339d0b97169 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:55:20 -0400 Subject: [PATCH 132/720] fixing typo in documentation --- notification/twilio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index 00bde6cc8f2..a45dc022988 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -87,8 +87,8 @@ EXAMPLES = ''' - "+19735559010" delegate_to: localhost -# send an MMS to multiple phone numbers with an update on the -# deployment and a screenshot of the results +# send an MMS to a single recipient with an update on the deployment +# and an image of the results # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: From 6522e4e016c958b09c2b0a21db671ac670f8f942 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 12:06:27 -0400 Subject: [PATCH 133/720] updating main for loop based on @abadger code review --- notification/twilio.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index a45dc022988..faae7b6f58f 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -157,17 +157,15 @@ def main(): to_number = module.params['to_number'] media_url = module.params['media_url'] - try: - if isinstance(to_number, list): - for number in to_number: - post_twilio_api(module, account_sid, auth_token, msg, - from_number, number, media_url) - else: + if not isinstance(to_number, list): + to_number = [to_number] + + for number in to_number: + try: post_twilio_api(module, account_sid, auth_token, msg, - from_number, to_number, media_url) - pass - except Exception: - module.fail_json(msg="unable to send text message to %s" % to_number) + from_number, number, media_url) + except Exception: + module.fail_json(msg="unable to send message to %s" % number) module.exit_json(msg=msg, changed=False) From 582da5b911212b37fee1272696daeb0b1c1cae97 Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Tue, 14 Apr 2015 12:02:32 -0500 Subject: [PATCH 134/720] Make 'module' global. Small whitespace formatting fixes. Closes Issue #397. --- system/gluster_volume.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index e04df48d5f4..c1607f627c3 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -134,6 +134,7 @@ glusterbin = '' def run_gluster(gargs, **kwargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) try: @@ -146,6 +147,7 @@ def run_gluster(gargs, **kwargs): def run_gluster_nofail(gargs, **kwargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, **kwargs) @@ -155,6 +157,7 @@ def run_gluster_nofail(gargs, **kwargs): def run_gluster_yes(gargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, data='y\n') @@ -240,6 +243,7 @@ def wait_for_peer(host): return False def probe(host): + global module run_gluster([ 'peer', 'probe', host ]) if not wait_for_peer(host): module.fail_json(msg='failed to probe peer %s' % host) @@ -285,18 +289,19 @@ def add_brick(name, brick, force): run_gluster(args) def do_rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) + run_gluster([ 'volume', 'rebalance', name, 'start' ]) def enable_quota(name): run_gluster([ 'volume', 'quota', name, 'enable' ]) def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) def main(): ### MAIN ### + global module module = AnsibleModule( argument_spec=dict( name=dict(required=True, default=None, aliases=['volume']), From 658e7300ad966c16c1440da498d945c7d15539c8 Mon Sep 17 00:00:00 2001 From: Benjamin Albrecht Date: Tue, 14 Apr 2015 20:56:36 +0200 Subject: [PATCH 135/720] Fix possible values for zfs sync property --- system/zfs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index 93248897051..1ac14361e09 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -177,7 +177,7 @@ options: description: - The sync property. required: False - choices: ['on','off'] + choices: ['standard','always','disabled'] utf8only: description: - The utf8only property. @@ -368,7 +368,7 @@ def main(): 'sharenfs': {'required': False}, 'sharesmb': {'required': False}, 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['on', 'off']}, + 'sync': {'required': False, 'choices':['standard', 'always', 'disabled']}, # Not supported #'userquota': {'required': False}, 'utf8only': {'required': False, 'choices':['on', 'off']}, From 9ba0f9f57280911cc49e53ee948e972776f9fc2e Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 18:48:36 -0400 Subject: [PATCH 136/720] Expand user in path, fix #385 --- files/patch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/patch.py b/files/patch.py index 2f2894a6508..ec3a3b02c00 100755 --- a/files/patch.py +++ b/files/patch.py @@ -130,6 +130,7 @@ def main(): # Create type object as namespace for module params p = type('Params', (), module.params) + p.src = os.path.expanduser(p.src) if not os.access(p.src, R_OK): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) From 14cf26f520a1d0b053024343158bb563f446a08e Mon Sep 17 00:00:00 2001 From: Balazs Pocze Date: Tue, 14 Apr 2015 17:35:13 -0700 Subject: [PATCH 137/720] gtid_replication parameter changed to master_auto_position to keep the same variable name as the MySQL server itself --- database/mysql/mysql_replication.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 9ee71aa6021..1bbbb1688db 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -103,7 +103,7 @@ options: master_ssl_cipher: description: - same as mysql variable - gtid_replication: + master_auto_position: descrtiption: - does the host uses GTID based replication or not possible values: 0,1 @@ -235,7 +235,7 @@ def main(): login_host=dict(default="localhost"), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), - gtid_replication=dict(default=None, choices=['0', '1']), + master_auto_position=dict(default=None, choices=['0', '1']), master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), @@ -272,7 +272,7 @@ def main(): master_ssl_cert = module.params["master_ssl_cert"] master_ssl_key = module.params["master_ssl_key"] master_ssl_cipher = module.params["master_ssl_cipher"] - gtid_replication = module.params["gtid_replication"] + master_auto_position = module.params["master_auto_position"] if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") @@ -369,7 +369,7 @@ def main(): if master_ssl_cipher: chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s") chm_params['master_ssl_cipher'] = master_ssl_cipher - if gtid_replication: + if master_auto_position: chm.append("MASTER_AUTO_POSITION = 1") changemaster(cursor, chm, chm_params) module.exit_json(changed=True) From b4ad53224882fc18d332f9c181ebec8997ca3122 Mon Sep 17 00:00:00 2001 From: Niall Donegan Date: Wed, 15 Apr 2015 16:15:31 +0100 Subject: [PATCH 138/720] sys.exit used, but not imported. --- network/dnsimple.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/network/dnsimple.py b/network/dnsimple.py index 363a2ca24c1..4e6ae4ec57e 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -32,7 +32,7 @@ options: description: - Account API token. See I(account_email) for info. required: false - default: null + default: null domain: description: @@ -67,7 +67,7 @@ options: default: 3600 (one hour) value: - description: + description: - Record value - "Must be specified when trying to ensure a record exists" required: false @@ -130,12 +130,13 @@ EXAMPLES = ''' ''' import os +from sys import exit try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException except ImportError: print "failed=True msg='dnsimple required for this module'" - sys.exit(1) + exit(1) def main(): module = AnsibleModule( @@ -148,7 +149,7 @@ def main(): type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl = dict(required=False, default=3600, type='int'), value = dict(required=False), - priority = dict(required=False, type='int'), + priority = dict(required=False, type='int'), state = dict(required=False, choices=['present', 'absent']), solo = dict(required=False, type='bool'), ), From b125d2685a0619435530a3267ae800ff7dcf6ebc Mon Sep 17 00:00:00 2001 From: Niall Donegan Date: Wed, 15 Apr 2015 17:07:18 +0100 Subject: [PATCH 139/720] Handle missing module with fail_json --- network/dnsimple.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/network/dnsimple.py b/network/dnsimple.py index 4e6ae4ec57e..9aa52172f19 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -130,13 +130,12 @@ EXAMPLES = ''' ''' import os -from sys import exit try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True except ImportError: - print "failed=True msg='dnsimple required for this module'" - exit(1) + HAS_DNSIMPLE = False def main(): module = AnsibleModule( @@ -159,6 +158,9 @@ def main(): supports_check_mode = True, ) + if not HAS_DNSIMPLE: + module.fail_json("dnsimple required for this module") + account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') From c2ae5d8e4cfde2b68e02b7452d6b2718571c0c65 Mon Sep 17 00:00:00 2001 From: Balazs Pocze Date: Wed, 15 Apr 2015 13:45:26 -0700 Subject: [PATCH 140/720] Changes made as modifying master_auto_position parameter type to bool --- database/mysql/mysql_replication.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 1bbbb1688db..f61a6c09dd1 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -106,7 +106,6 @@ options: master_auto_position: descrtiption: - does the host uses GTID based replication or not - possible values: 0,1 ''' EXAMPLES = ''' @@ -235,7 +234,7 @@ def main(): login_host=dict(default="localhost"), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), - master_auto_position=dict(default=None, choices=['0', '1']), + master_auto_position=dict(default=False, type='bool'), master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), From 698098ae5720bf940a64643dc39c0faa37a88781 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 09:45:23 -0400 Subject: [PATCH 141/720] doc update for zypper --- packaging/os/zypper.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 5daec8d1429..ccf901d4fa1 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -56,6 +56,7 @@ options: required: false choices: [ package, patch, pattern, product, srcpackage ] default: "package" + version_added: "2.0" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package From c7853c9f0891a048486f68dfd4c1fc2d107da8bd Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Fri, 17 Apr 2015 14:48:53 -0500 Subject: [PATCH 142/720] Issue #403: Add support for multiple bricks to gluster_volume --- system/gluster_volume.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c1607f627c3..b27d41ca342 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -66,7 +66,7 @@ options: required: false default: null description: - - Brick path on servers + - Brick path on servers. Multiple bricks can be specified by commas start_on_create: choices: [ 'yes', 'no'] required: false @@ -256,7 +256,7 @@ def probe_all_peers(hosts, peers, myhostname): if myhostname != host: probe(host) -def create_volume(name, stripe, replica, transport, hosts, brick, force): +def create_volume(name, stripe, replica, transport, hosts, bricks, force): args = [ 'volume', 'create' ] args.append(name) if stripe: @@ -267,8 +267,9 @@ def create_volume(name, stripe, replica, transport, hosts, brick, force): args.append(str(replica)) args.append('transport') args.append(transport) - for host in hosts: - args.append(('%s:%s' % (host, brick))) + for brick in bricks: + for host in hosts: + args.append(('%s:%s' % (host, brick))) if force: args.append('force') run_gluster(args) @@ -329,7 +330,7 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_path = module.params['brick'] + brick_paths = module.params['brick'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] @@ -341,6 +342,11 @@ def main(): if not myhostname: myhostname = socket.gethostname() + if brick_paths != None and "," in brick_paths: + brick_paths = brick_paths.split(",") + else: + brick_paths = [brick_paths] + options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] @@ -364,7 +370,7 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_path, force) + create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True @@ -378,10 +384,11 @@ def main(): removed_bricks = [] all_bricks = [] for node in cluster: - brick = '%s:%s' % (node, brick_path) - all_bricks.append(brick) - if brick not in volumes[volume_name]['bricks']: - new_bricks.append(brick) + for brick_path in brick_paths: + brick = '%s:%s' % (node, brick_path) + all_bricks.append(brick) + if brick not in volumes[volume_name]['bricks']: + new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: From 20d998a6d680d94fa9146ee590fd33d0dd5609d3 Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Fri, 17 Apr 2015 14:59:10 -0500 Subject: [PATCH 143/720] Improve documentation for gluster_volume brick parameter --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index b27d41ca342..3af54e8d551 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -66,7 +66,7 @@ options: required: false default: null description: - - Brick path on servers. Multiple bricks can be specified by commas + - Brick path on servers. Multiple brick paths can be separated by commas start_on_create: choices: [ 'yes', 'no'] required: false From 2535a4928423b783ee1a8df939253e18abd545ea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 18 Apr 2015 21:50:06 -0400 Subject: [PATCH 144/720] fixed 2.4 compatibility --- system/known_hosts.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index b332528ed19..30ea7755553 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -128,18 +128,23 @@ def enforce_state(module, params): module.fail_json(msg="Failed to read %s: %s" % \ (path,str(e))) try: - outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path), - delete=False) + outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path)) if inf is not None: for line in inf: outf.write(line) inf.close() outf.write(key) - outf.close() + outf.flush() module.atomic_move(outf.name,path) except (IOError,OSError),e: module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) + + try: + outf.close() + except: + pass + params['changed'] = True return params @@ -162,16 +167,20 @@ def sanity_check(module,host,key,sshkeygen): #The approach is to write the key to a temporary file, #and then attempt to look up the specified host in that file. try: - outf=tempfile.NamedTemporaryFile(delete=False) + outf=tempfile.NamedTemporaryFile() outf.write(key) - outf.close() + outf.flush() except IOError,e: module.fail_json(msg="Failed to write to temporary file %s: %s" % \ (outf.name,str(e))) rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, '-f',outf.name], check_rc=True) - os.remove(outf.name) + try: + outf.close() + except: + pass + if stdout=='': #host not found module.fail_json(msg="Host parameter does not match hashed host field in supplied key") From 9d720f52c805e3843fca49926127de7f94afff31 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Sat, 4 Apr 2015 19:27:37 +0100 Subject: [PATCH 145/720] Add an accept_licenses option to pkg5 module This accepts any software licences required by the package. --- packaging/os/pkg5.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index eea860e7be2..0e4c565b90e 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -39,6 +39,13 @@ options: required: false default: present choices: [ present, latest, absent ] + accept_licenses: + description: + - Accept any licences. + required: false + default: false + choices: [ true, false ] + aliases: [ accept_licences, accept ] ''' EXAMPLES = ''' # Install Vim: @@ -70,6 +77,11 @@ def main(): 'removed', ] ), + accept_licenses=dict( + choices=BOOLEANS, + default=False + aliases=['accept_licences', 'accept'] + ), ) ) @@ -89,14 +101,14 @@ def main(): packages.append(fragment) if params['state'] in ['present', 'installed']: - ensure(module, 'present', packages) + ensure(module, 'present', packages, params) elif params['state'] in ['latest']: - ensure(module, 'latest', packages) + ensure(module, 'latest', packages, params) elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', packages) + ensure(module, 'absent', packages, params) -def ensure(module, state, packages): +def ensure(module, state, packages, params): response = { 'results': [], 'msg': '', @@ -119,7 +131,13 @@ def ensure(module, state, packages): to_modify = filter(behaviour[state]['filter'], packages) if to_modify: rc, out, err = module.run_command( - ['pkg', behaviour[state]['subcommand'], '-q', '--'] + to_modify + [ + 'pkg', behaviour[state]['subcommand'] + ] + + (['--accept'] if params['accept_licenses'] else []) + + [ + '-q', '--' + ] + to_modify ) response['rc'] = rc response['results'].append(out) From ae34395f01fa3ef5df1ad38895a260c849f65074 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Sun, 19 Apr 2015 15:29:30 -0400 Subject: [PATCH 146/720] Fixes bug where state=absent did not work --- system/gluster_volume.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c1607f627c3..2e45a58cc8a 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -356,7 +356,9 @@ def main(): # do the work! if action == 'absent': if volume_name in volumes: - run_gluster([ 'volume', 'delete', name ]) + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + run_gluster_yes([ 'volume', 'delete', volume_name ]) changed = True if action == 'present': From 6d66beb7936fc13b4b873d88504fdda067bab87b Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Mon, 20 Apr 2015 09:13:52 -0500 Subject: [PATCH 147/720] Change 'brick' to 'bricks' and add 'brick' as an alias. Add example with multiple bricks. --- system/gluster_volume.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 3af54e8d551..37dc6bbc27d 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -62,11 +62,11 @@ options: default: 'tcp' description: - Transport type for volume - brick: + bricks: required: false default: null description: - - Brick path on servers. Multiple brick paths can be separated by commas + - Brick paths on servers. Multiple brick paths can be separated by commas start_on_create: choices: [ 'yes', 'no'] required: false @@ -107,7 +107,7 @@ author: Taneli Leppä EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" run_once: true - name: tune @@ -124,6 +124,10 @@ EXAMPLES = """ - name: remove gluster volume gluster_volume: state=absent name=test1 + +- name: create gluster volume with multiple bricks + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster:"{{ play_hosts }}" + run_once: true """ import shutil @@ -312,7 +316,7 @@ def main(): stripes=dict(required=False, default=None, type='int'), replicas=dict(required=False, default=None, type='int'), transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), - brick=dict(required=False, default=None), + bricks=dict(required=False, default=None, aliases=['brick']), start_on_create=dict(required=False, default=True, type='bool'), rebalance=dict(required=False, default=False, type='bool'), options=dict(required=False, default={}, type='dict'), From 3c9131b09c3ca16d25a9340b965bdd4df70b8325 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Apr 2015 10:20:50 -0400 Subject: [PATCH 148/720] added aliases docs --- system/gluster_volume.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index af4a70b7094..2a8bc74df72 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -67,6 +67,7 @@ options: default: null description: - Brick paths on servers. Multiple brick paths can be separated by commas + aliases: ['brick'] start_on_create: choices: [ 'yes', 'no'] required: false From cb05f0834b522c02745007fd08c4d184b8ce2e95 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Mon, 20 Apr 2015 19:52:31 +0100 Subject: [PATCH 149/720] Fix typo. --- packaging/os/pkg5.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 0e4c565b90e..b250a02850c 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -79,8 +79,8 @@ def main(): ), accept_licenses=dict( choices=BOOLEANS, - default=False - aliases=['accept_licences', 'accept'] + default=False, + aliases=['accept_licences', 'accept'], ), ) ) From 7458cdd72253b5dcdbd8fa892b722deb7ee1b69b Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Tue, 21 Apr 2015 10:38:40 -0400 Subject: [PATCH 150/720] New VMware Module to support adding a datacenter --- cloud/vmware_datacenter.py | 175 +++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 cloud/vmware_datacenter.py diff --git a/cloud/vmware_datacenter.py b/cloud/vmware_datacenter.py new file mode 100644 index 00000000000..c3125760484 --- /dev/null +++ b/cloud/vmware_datacenter.py @@ -0,0 +1,175 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_datacenter +short_description: Create VMware vSphere Datacenter +description: + - Create VMware vSphere Datacenter +version_added: 2.0 +author: Joseph Callen +notes: +requirements: + - Tested on vSphere 5.5 + - PyVmomi installed +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter + required: True + version_added: 2.0 + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + version_added: 2.0 + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + version_added: 2.0 + datacenter_name: + description: + - The name of the datacenter the cluster will be created in. + required: True + version_added: 2.0 +''' + +EXAMPLES = ''' +# Example vmware_datacenter command from Ansible Playbooks +- name: Create Datacenter + local_action: > + vmware_datacenter + hostname="{{ ansible_ssh_host }}" username=root password=vmware + datacenter_name="datacenter" +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def state_create_datacenter(module): + datacenter_name = module.params['datacenter_name'] + content = module.params['content'] + changed = True + datacenter = None + + folder = content.rootFolder + + try: + if not module.check_mode: + datacenter = folder.CreateDatacenter(name=datacenter_name) + module.exit_json(changed=changed, result=str(datacenter)) + except vim.fault.DuplicateName: + module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name) + except vim.fault.InvalidName: + module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name) + except vmodl.fault.NotSupported: + # This should never happen + module.fail_json(msg="Trying to create a datacenter on an incorrect folder object") + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def check_datacenter_state(module): + datacenter_name = module.params['datacenter_name'] + + try: + content = connect_to_api(module) + datacenter = find_datacenter_by_name(content, datacenter_name) + module.params['content'] = content + + if datacenter is None: + return 'absent' + else: + module.params['datacenter'] = datacenter + return 'present' + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_destroy_datacenter(module): + datacenter = module.params['datacenter'] + changed = True + result = None + + try: + if not module.check_mode: + task = datacenter.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=result) + except vim.fault.VimFault as vim_fault: + module.fail_json(msg=vim_fault.msg) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_update_datacenter(module): + module.exit_json(changed=False, msg="Currently Not Implemented") + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + datacenter_states = { + 'absent': { + 'present': state_destroy_datacenter, + 'absent': state_exit_unchanged, + }, + 'present': { + 'update': state_update_datacenter, + 'present': state_exit_unchanged, + 'absent': state_create_datacenter, + } + } + desired_state = module.params['state'] + current_state = check_datacenter_state(module) + + datacenter_states[desired_state][current_state](module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() From 1fa73cd0a3ce99cefbc4712e2a562cf19296d51a Mon Sep 17 00:00:00 2001 From: Doug Luce Date: Tue, 10 Mar 2015 18:03:20 -0700 Subject: [PATCH 151/720] Add the cronvar module. This manages environment variables in Vixie crontabs. It includes addition/removal/replacement of variables and ordering via the insertbefore/insertafter parameters. --- system/cronvar.py | 430 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 430 insertions(+) create mode 100755 system/cronvar.py diff --git a/system/cronvar.py b/system/cronvar.py new file mode 100755 index 00000000000..23a626472c3 --- /dev/null +++ b/system/cronvar.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Cronvar Plugin: The goal of this plugin is to provide an indempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable +# +# This module is based on the crontab module. +# + +DOCUMENTATION = """ +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. This module allows + you to create, update, or delete cron variable definitions. +version_added: "2.0" +options: + name: + description: + - Name of the crontab variable. + default: null + required: true + value: + description: + - The value to set this variable to. Required if state=present. + required: false + default: null + insertafter: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + after the variable specified. + insertbefore: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + just before the variable specified. + state: + description: + - Whether to ensure that the variable is present or absent. + required: false + default: present + choices: [ "present", "absent" ] + user: + description: + - The specific user whose crontab should be modified. + required: false + default: root + cron_file: + description: + - If specified, uses this file in cron.d instead of an individual user's crontab. + required: false + default: null + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + required: false + default: false +requirements: + - cron +author: Doug Luce +""" + +EXAMPLES = ''' +# Ensure a variable exists. +# Creates an entry like "EMAIL=doug@ansibmod.con.com" +- cronvar: name="EMAIL" value="doug@ansibmod.con.com" + +# Make sure a variable is gone. This will remove any variable named +# "LEGACY" +- cronvar: name="LEGACY" state=absent + +# Adds a variable to a file under /etc/cron.d +- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log" + user="root" cron_file=ansible_yum-autoupdate +''' + +import os +import re +import tempfile +import platform +import pipes +import shlex + +CRONCMD = "/usr/bin/crontab" + +class CronVarError(Exception): + pass + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + if self.user is None: + self.user = 'root' + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', )) + # select whether we dump additional debug info through syslog + self.syslogging = False + + if cron_file: + self.cron_file = '/etc/cron.d/%s' % cron_file + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError, e: + # cron file does not exist + return + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and + not re.match( r'# \(/tmp/.*installed on.*\)', l) and + not re.match( r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + if self.syslogging: + syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError, e: + # cron file does not exist + return False + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + comment = None + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + (var_name, _) = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) + elif platform.system() == 'AIX': + return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) + +#================================================== + +def main(): + # The following example playbooks: + # + # - cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + value=dict(required=False), + user=dict(required=False), + cron_file=dict(required=False), + insertafter=dict(default=None), + insertbefore=dict(default=None), + state=dict(default='present', choices=['present', 'absent']), + backup=dict(default=False, type='bool'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(022) + cronvar = CronVar(module, user, cron_file) + + if cronvar.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variabale") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + # --- should never get here + module.exit_json(msg="Unable to execute cronvar task.") + +# import module snippets +from ansible.module_utils.basic import * + +main() From 3148aafa17c64b002693dc3f41216a87b4456b83 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 10:18:34 -0400 Subject: [PATCH 152/720] moved new module to proper subdir fixed doc issues minor code adjustments --- cloud/vmware/__init__.py | 0 cloud/{ => vmware}/vmware_datacenter.py | 36 ++++++++++++------------- 2 files changed, 18 insertions(+), 18 deletions(-) create mode 100644 cloud/vmware/__init__.py rename cloud/{ => vmware}/vmware_datacenter.py (88%) diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py similarity index 88% rename from cloud/vmware_datacenter.py rename to cloud/vmware/vmware_datacenter.py index c3125760484..35cf7fa4692 100644 --- a/cloud/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen @@ -21,38 +21,39 @@ DOCUMENTATION = ''' --- module: vmware_datacenter -short_description: Create VMware vSphere Datacenter +short_description: Manage VMware vSphere Datacenters description: - - Create VMware vSphere Datacenter + - Manage VMware vSphere Datacenters version_added: 2.0 author: Joseph Callen notes: -requirements: - Tested on vSphere 5.5 - - PyVmomi installed +requirements: + - PyVmomi options: - hostname: + hostname: description: - - The hostname or IP address of the vSphere vCenter + - The hostname or IP address of the vSphere vCenter API server required: True - version_added: 2.0 username: description: - The username of the vSphere vCenter required: True aliases: ['user', 'admin'] - version_added: 2.0 password: description: - The password of the vSphere vCenter required: True aliases: ['pass', 'pwd'] - version_added: 2.0 datacenter_name: description: - The name of the datacenter the cluster will be created in. required: True - version_added: 2.0 + state: + description: + - If the datacenter should be present or absent + choices: ['present', 'absent'] + required: True ''' EXAMPLES = ''' @@ -133,10 +134,6 @@ def state_destroy_datacenter(module): module.fail_json(msg=method_fault.msg) -def state_update_datacenter(module): - module.exit_json(changed=False, msg="Currently Not Implemented") - - def state_exit_unchanged(module): module.exit_json(changed=False) @@ -144,8 +141,12 @@ def state_exit_unchanged(module): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), - state=dict(default='present', choices=['present', 'absent'], type='str'))) + argument_spec.update( + dict( + datacenter_name=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ) + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_PYVMOMI: @@ -157,7 +158,6 @@ def main(): 'absent': state_exit_unchanged, }, 'present': { - 'update': state_update_datacenter, 'present': state_exit_unchanged, 'absent': state_create_datacenter, } From a56c8ebff19a1c6ccc1d801e04e344cae6322b0a Mon Sep 17 00:00:00 2001 From: Matt Jaynes Date: Wed, 22 Apr 2015 21:14:47 +0200 Subject: [PATCH 153/720] Document 'msg' param and fix examples The 'msg' alias for 'subject' isn't in the documentation, so adding it. In the gmail example, it uses both the 'subject' and 'msg' params, but 'msg' is an alias of 'subject', so you are essentially declaring the same param twice. If you use this example, then no subject is sent (I tested with gmail). Documentation example is updated to use 'body' as intended. Also, updated the simple example to use 'subject' instead of the 'msg' alias since it is more explicit. --- notification/mail.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index a1ec44087dd..10ef61ef2be 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -61,7 +61,7 @@ options: required: false subject: description: - - The subject of the email being sent. + - The subject of the email being sent. Alias: I(msg) aliases: [ msg ] required: true body: @@ -115,7 +115,7 @@ options: EXAMPLES = ''' # Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.' # Sending an e-mail using Gmail SMTP servers - local_action: mail @@ -125,7 +125,7 @@ EXAMPLES = ''' password='mysecret' to="John Smith " subject='Ansible-report' - msg='System {{ ansible_hostname }} has been successfully provisioned.' + body='System {{ ansible_hostname }} has been successfully provisioned.' # Send e-mail to a bunch of users, attaching files - local_action: mail From 39028d6cefc9617da0585c93f7d11f3968e09647 Mon Sep 17 00:00:00 2001 From: Matt Jaynes Date: Wed, 22 Apr 2015 22:23:07 +0200 Subject: [PATCH 154/720] Remove mentions of 'msg' alias --- notification/mail.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index 10ef61ef2be..ae33c5ca4ca 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -61,8 +61,7 @@ options: required: false subject: description: - - The subject of the email being sent. Alias: I(msg) - aliases: [ msg ] + - The subject of the email being sent. required: true body: description: From 91483bdd6b9a3dd0c0ad047a1209801068afcb27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Wallstro=CC=88m?= Date: Fri, 24 Apr 2015 10:48:02 +0200 Subject: [PATCH 155/720] Modules to manage IIS Wraps the Web Server Administration module for PowerShell into Ansible modules. --- windows/win_iis_virtualdirectory.ps1 | 128 +++++++++++++++++++ windows/win_iis_virtualdirectory.py | 67 ++++++++++ windows/win_iis_webapplication.ps1 | 132 ++++++++++++++++++++ windows/win_iis_webapplication.py | 68 ++++++++++ windows/win_iis_webapppool.ps1 | 112 +++++++++++++++++ windows/win_iis_webapppool.py | 112 +++++++++++++++++ windows/win_iis_webbinding.ps1 | 138 +++++++++++++++++++++ windows/win_iis_webbinding.py | 143 +++++++++++++++++++++ windows/win_iis_website.ps1 | 179 +++++++++++++++++++++++++++ windows/win_iis_website.py | 133 ++++++++++++++++++++ 10 files changed, 1212 insertions(+) create mode 100644 windows/win_iis_virtualdirectory.ps1 create mode 100644 windows/win_iis_virtualdirectory.py create mode 100644 windows/win_iis_webapplication.ps1 create mode 100644 windows/win_iis_webapplication.py create mode 100644 windows/win_iis_webapppool.ps1 create mode 100644 windows/win_iis_webapppool.py create mode 100644 windows/win_iis_webbinding.ps1 create mode 100644 windows/win_iis_webbinding.py create mode 100644 windows/win_iis_website.ps1 create mode 100644 windows/win_iis_website.py diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1 new file mode 100644 index 00000000000..3f2ab692b42 --- /dev/null +++ b/windows/win_iis_virtualdirectory.ps1 @@ -0,0 +1,128 @@ +#!powershell +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# Application +$application = Get-Attr $params "application" $FALSE; + +# State parameter +$state = Get-Attr $params "state" "present"; +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + directory = New-Object psobject + changed = $false +}; + +# Construct path +$directory_path = if($application) { + "IIS:\Sites\$($site)\$($application)\$($name)" +} else { + "IIS:\Sites\$($site)\$($name)" +} + +# Directory info +$directory = Get-WebVirtualDirectory -Site $site -Name $name + +try { + # Add directory + If(($state -eq 'present') -and (-not $directory)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $directory_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application) { + $directory_parameters.Application = $application + } + + $directory = New-WebVirtualDirectory @directory_parameters -Force + $result.changed = $true + } + + # Remove directory + If ($state -eq 'absent' -and $directory) { + Remove-Item $directory_path + $result.changed = $true + } + + $directory = Get-WebVirtualDirectory -Site $site -Name $name + If($directory) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $vdir_folder = Get-Item $directory.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $vdir_folder.FullName) { + Set-ItemProperty $directory_path -name physicalPath -value $physical_path + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$directory = Get-WebVirtualDirectory -Site $site -Name $name +$result.directory = New-Object psobject @{ + PhysicalPath = $directory.PhysicalPath +} + +Exit-Json $result diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py new file mode 100644 index 00000000000..bbedfbbb4ab --- /dev/null +++ b/windows/win_iis_virtualdirectory.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_virtualdirectory +version_added: "1.9" +short_description: Configures a IIS virtual directories. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - The name of the virtual directory to create. + required: true + default: null + aliases: [] + state: + description: + - + choices: + - absent + - present + required: false + default: null + aliases: [] + site: + description: + - The site name under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + application: + description: + - The application under which the virtual directory is created or exists. + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' + +''' diff --git a/windows/win_iis_webapplication.ps1 b/windows/win_iis_webapplication.ps1 new file mode 100644 index 00000000000..e576dd5081c --- /dev/null +++ b/windows/win_iis_webapplication.ps1 @@ -0,0 +1,132 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# Site +$site = Get-Attr $params "site" $FALSE; +If ($site -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: site"; +} + +# State parameter +$state = Get-Attr $params "state" "present"; +$state.ToString().ToLower(); +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + application = New-Object psobject + changed = $false +}; + +# Application info +$application = Get-WebApplication -Site $site -Name $name + +try { + # Add application + If(($state -eq 'present') -and (-not $application)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $application_parameters = New-Object psobject @{ + Site = $site + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $application_parameters.ApplicationPool = $application_pool + } + + $application = New-WebApplication @application_parameters -Force + $result.changed = $true + + } + + # Remove application + if ($state -eq 'absent' -and $application) { + $application = Remove-WebApplication -Site $site -Name $name + $result.changed = $true + } + + $application = Get-WebApplication -Site $site -Name $name + If($application) { + + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $app_folder = Get-Item $application.PhysicalPath + $folder = Get-Item $physical_path + If($folder.FullName -ne $app_folder.FullName) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name physicalPath -value $physical_path + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $application.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$application = Get-WebApplication -Site $site -Name $name +$result.application = New-Object psobject @{ + PhysicalPath = $application.PhysicalPath + ApplicationPool = $application.applicationPool +} + +Exit-Json $result diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py new file mode 100644 index 00000000000..d8a59b66054 --- /dev/null +++ b/windows/win_iis_webapplication.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_website +version_added: "1.9" +short_description: Configures a IIS Web application. +description: + - Creates, Removes and configures a IIS Web applications +options: + name: + description: + - Name of the Web applicatio + required: true + default: null + aliases: [] + site: + description: + - Name of the site on which the application is created. + required: true + default: null + aliases: [] + state: + description: + - State of the web application + choices: + - present + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host + +''' diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1 new file mode 100644 index 00000000000..2ed369e4a3f --- /dev/null +++ b/windows/win_iis_webapppool.ps1 @@ -0,0 +1,112 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ('started', 'restarted', 'stopped', 'absent'); +If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +# Attributes parameter - Pipe separated list of attributes where +# keys and values are separated by comma (paramA:valyeA|paramB:valueB) +$attributes = @{}; +If ($params.attributes) { + $params.attributes -split '\|' | foreach { + $key, $value = $_ -split "\:"; + $attributes.Add($key, $value); + } +} + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $NULL){ + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + changed = $FALSE + attributes = $attributes +}; + +# Get pool +$pool = Get-Item IIS:\AppPools\$name + +try { + # Add + if (-not $pool -and $state -in ('started', 'stopped', 'restarted')) { + New-WebAppPool $name + $result.changed = $TRUE + } + + # Remove + if ($pool -and $state -eq 'absent') { + Remove-WebAppPool $name + $result.changed = $TRUE + } + + $pool = Get-Item IIS:\AppPools\$name + if($pool) { + # Set properties + $attributes.GetEnumerator() | foreach { + $newParameter = $_; + $currentParameter = Get-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key + if(-not $currentParameter -or ($currentParameter.Value -as [String]) -ne $newParameter.Value) { + Set-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key $newParameter.Value + $result.changed = $TRUE + } + } + + # Set run state + if (($state -eq 'stopped') -and ($pool.State -eq 'Started')) { + Stop-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) { + Start-WebAppPool -Name $name -ErrorAction Stop + $result.changed = $TRUE + } + } +} catch { + Fail-Json $result $_.Exception.Message +} + +# Result +$pool = Get-Item IIS:\AppPools\$name +$result.info = @{ + name = $pool.Name + state = $pool.State + attributes = New-Object psobject @{} +}; + +$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)}; + +Exit-Json $result diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py new file mode 100644 index 00000000000..320fe07f637 --- /dev/null +++ b/windows/win_iis_webapppool.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webapppool +version_added: "1.9" +short_description: Configures a IIS Web Application Pool. +description: + - Creates, Removes and configures a IIS Web Application Pool +options: + name: + description: + - Names of application pool + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - absent + - stopped + - started + - restarted + required: false + default: null + aliases: [] + attributes: + description: + - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing application pool +$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows +host | success >> { + "attributes": {}, + "changed": false, + "info": { + "attributes": { + "CLRConfigFile": "", + "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415", + "autoStart": true, + "enable32BitAppOnWin64": false, + "enableConfigurationOverride": true, + "managedPipelineMode": 0, + "managedRuntimeLoader": "webengine4.dll", + "managedRuntimeVersion": "v4.0", + "name": "DefaultAppPool", + "passAnonymousToken": true, + "queueLength": 1000, + "startMode": 0, + "state": 1 + }, + "name": "DefaultAppPool", + "state": "Started" + } +} + +# This creates a new application pool in 'Started' state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows + +# This stoppes an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This restarts an application pool +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows + +# This change application pool attributes without touching state +$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + +# This creates an application pool and sets attributes +$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows + + +# Playbook example +--- + +- name: App Pool with .NET 4.0 + win_iis_webapppool: + name: 'AppPool' + state: started + attributes: managedRuntimeVersion:v4.0 + register: webapppool + +''' diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1 new file mode 100644 index 00000000000..bdff43fc63c --- /dev/null +++ b/windows/win_iis_webbinding.ps1 @@ -0,0 +1,138 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$valid_states = ($FALSE, 'present', 'absent'); +If ($state -NotIn $valid_states) { + Fail-Json $result "state is '$state'; must be $($valid_states)" +} + +$binding_parameters = New-Object psobject @{ + Name = $name +}; + +If ($params.host_header) { + $binding_parameters.HostHeader = $params.host_header +} + +If ($params.protocol) { + $binding_parameters.Protocol = $params.protocol +} + +If ($params.port) { + $binding_parameters.Port = $params.port +} + +If ($params.ip) { + $binding_parameters.IPAddress = $params.ip +} + +$certificateHash = Get-Attr $params "certificate_hash" $FALSE; +$certificateStoreName = Get-Attr $params "certificate_store_name" "MY"; + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){ + Import-Module WebAdministration +} + +function Create-Binding-Info { + return New-Object psobject @{ + "bindingInformation" = $args[0].bindingInformation + "certificateHash" = $args[0].certificateHash + "certificateStoreName" = $args[0].certificateStoreName + "isDsMapperEnabled" = $args[0].isDsMapperEnabled + "protocol" = $args[0].protocol + "sslFlags" = $args[0].sslFlags + } +} + +# Result +$result = New-Object psobject @{ + changed = $false + parameters = $binding_parameters + matched = @() + removed = @() + added = @() +}; + +# Get bindings matching parameters +$curent_bindings = Get-WebBinding @binding_parameters +$curent_bindings | Foreach { + $result.matched += Create-Binding-Info $_ +} + +try { + # Add + if (-not $curent_bindings -and $state -eq 'present') { + New-WebBinding @binding_parameters -Force + + # Select certificat + if($certificateHash -ne $FALSE) { + + $ip = $binding_parameters.IPAddress + if((!$ip) -or ($ip -eq "*")) { + $ip = "0.0.0.0" + } + + $port = $binding_parameters.Port + if(!$port) { + $port = 443 + } + + $result.port = $port + $result.ip = $ip + + Push-Location IIS:\SslBindings\ + Get-Item Cert:\LocalMachine\$certificateStoreName\$certificateHash | New-Item "$($ip)!$($port)" + Pop-Location + } + + $result.added += Create-Binding-Info (Get-WebBinding @binding_parameters) + $result.changed = $true + } + + # Remove + if ($curent_bindings -and $state -eq 'absent') { + $curent_bindings | foreach { + Remove-WebBinding -InputObject $_ + $result.removed += Create-Binding-Info $_ + } + $result.changed = $true + } + + +} +catch { + Fail-Json $result $_.Exception.Message +} + +Exit-Json $result diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py new file mode 100644 index 00000000000..0cc5da158bf --- /dev/null +++ b/windows/win_iis_webbinding.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_iis_webbinding +version_added: "1.9" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a binding to an existing IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the binding + choices: + - present + - absent + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + host_header: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + protocol: + description: + - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). + required: false + default: null + aliases: [] + certificate_hash: + description: + - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate. + required: false + default: null + aliases: [] + certificate_store_name: + description: + - Name of the certificate store where the certificate for the binding is located. + required: false + default: "My" + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This will return binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site'" windows +host | success >> { + "added": [], + "changed": false, + "matched": [ + { + "bindingInformation": "*:80:", + "certificateHash": "", + "certificateStoreName": "", + "isDsMapperEnabled": false, + "protocol": "http", + "sslFlags": 0 + } + ], + "parameters": { + "Name": "Default Web Site" + }, + "removed": [] +} + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https" windows + +# This will return the HTTPS binding information for an existing host +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port:9090 state=present" windows + +# This will add a HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will remove the HTTP binding on port 9090 +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows + +# This will add a HTTPS binding +$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https state=present" windows + +# This will add a HTTPS binding and select certificate to use +# ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https certificate_hash= B0D0FA8408FC67B230338FCA584D03792DA73F4C" windows + + +# Playbook example +--- + +- name: Website http/https bidings + win_iis_webbinding: + name: "Default Web Site" + protocol: https + port: 443 + certificate_hash: "D1A3AF8988FD32D1A3AF8988FD323792DA73F4C" + state: present + when: monitor_use_https + +''' diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1 new file mode 100644 index 00000000000..bba1e941142 --- /dev/null +++ b/windows/win_iis_website.ps1 @@ -0,0 +1,179 @@ +#!powershell + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# Name parameter +$name = Get-Attr $params "name" $FALSE; +If ($name -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: name"; +} + +# State parameter +$state = Get-Attr $params "state" $FALSE; +$state.ToString().ToLower(); +If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted') -and ($state -ne 'absent')) { + Fail-Json (New-Object psobject) "state is '$state'; must be 'started', 'restarted', 'stopped' or 'absent'" +} + +# Path parameter +$physical_path = Get-Attr $params "physical_path" $FALSE; + +# Application Pool Parameter +$application_pool = Get-Attr $params "application_pool" $FALSE; + +# Binding Parameters +$bind_port = Get-Attr $params "port" $FALSE; +$bind_ip = Get-Attr $params "ip" $FALSE; +$bind_hostname = Get-Attr $params "hostname" $FALSE; +$bind_ssl = Get-Attr $params "ssl" $FALSE; + +# Custom site Parameters from string where properties +# are seperated by a pipe and property name/values by colon. +# Ex. "foo:1|bar:2" +$parameters = Get-Attr $params "parameters" $null; +if($parameters -ne $null) { + $parameters = @($parameters -split '\|' | ForEach { + return ,($_ -split "\:", 2); + }) +} + + +# Ensure WebAdministration module is loaded +if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) { + Import-Module WebAdministration +} + +# Result +$result = New-Object psobject @{ + site = New-Object psobject + changed = $false +}; + +# Site info +$site = Get-Website -Name $name + +Try { + # Add site + If(($state -ne 'absent') -and (-not $site)) { + If ($physical_path -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required arguments: physical_path" + } + ElseIf (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $site_parameters = New-Object psobject @{ + Name = $name + PhysicalPath = $physical_path + }; + + If ($application_pool) { + $site_parameters.ApplicationPool = $application_pool + } + + If ($bind_port) { + $site_parameters.Port = $bind_port + } + + If ($bind_ip) { + $site_parameters.IPAddress = $bind_ip + } + + If ($bind_hostname) { + $site_parameters.HostHeader = $bind_hostname + } + + $site = New-Website @site_parameters -Force + $result.changed = $true + } + + # Remove site + If ($state -eq 'absent' -and $site) { + $site = Remove-Website -Name $name + $result.changed = $true + } + + $site = Get-Website -Name $name + If($site) { + # Change Physical Path if needed + if($physical_path) { + If (-not (Test-Path $physical_path)) { + Fail-Json (New-Object psobject) "specified folder must already exist: physical_path" + } + + $folder = Get-Item $physical_path + If($folder.FullName -ne $site.PhysicalPath) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name physicalPath -value $folder.FullName + $result.changed = $true + } + } + + # Change Application Pool if needed + if($application_pool) { + If($application_pool -ne $site.applicationPool) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" -name applicationPool -value $application_pool + $result.changed = $true + } + } + + # Set properties + if($parameters) { + $parameters | foreach { + $parameter_value = Get-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] + if((-not $parameter_value) -or ($parameter_value.Value -as [String]) -ne $_[1]) { + Set-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] $_[1] + $result.changed = $true + } + } + } + + # Set run state + if (($state -eq 'stopped') -and ($site.State -eq 'Started')) + { + Stop-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + if ((($state -eq 'started') -and ($site.State -eq 'Stopped')) -or ($state -eq 'restarted')) + { + Start-Website -Name $name -ErrorAction Stop + $result.changed = $true + } + } +} +Catch +{ + Fail-Json (New-Object psobject) $_.Exception.Message +} + +$site = Get-Website -Name $name +$result.site = New-Object psobject @{ + Name = $site.Name + ID = $site.ID + State = $site.State + PhysicalPath = $site.PhysicalPath + ApplicationPool = $site.applicationPool + Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation }) +} + + +Exit-Json $result diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py new file mode 100644 index 00000000000..0893b11c2bd --- /dev/null +++ b/windows/win_iis_website.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Henrik Wallström +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: win_iis_website +version_added: "1.9" +short_description: Configures a IIS Web site. +description: + - Creates, Removes and configures a IIS Web site +options: + name: + description: + - Names of web site + required: true + default: null + aliases: [] + state: + description: + - State of the web site + choices: + - started + - restarted + - stopped + - absent + required: false + default: null + aliases: [] + physical_path: + description: + - The physical path on the remote host to use for the new site. The specified folder must already exist. + required: false + default: null + aliases: [] + application_pool: + description: + - The application pool in which the new site executes. + required: false + default: null + aliases: [] + port: + description: + - The port to bind to / use for the new site. + required: false + default: null + aliases: [] + ip: + description: + - The IP address to bind to / use for the new site. + required: false + default: null + aliases: [] + hostname: + description: + - The host header to bind to / use for the new site. + required: false + default: null + aliases: [] + ssl: + description: + - Enables HTTPS binding on the site.. + required: false + default: null + aliases: [] + parameters: + description: + - Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2" + required: false + default: null + aliases: [] +author: Henrik Wallström +''' + +EXAMPLES = ''' +# This return information about an existing host +$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window +host | success >> { + "changed": false, + "site": { + "ApplicationPool": "DefaultAppPool", + "Bindings": [ + "*:80:" + ], + "ID": 1, + "Name": "Default Web Site", + "PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot", + "State": "Stopped" + } +} + +# This stops an existing site. +$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host + +# This creates a new site. +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + +# Change logfile . +$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host + + +# Playbook example +--- + +- name: Acme IIS site + win_iis_website: + name: "Acme" + state: started + port: 80 + ip: 127.0.0.1 + hostname: acme.local + application_pool: "acme" + physical_path: 'c:\\sites\\acme' + parameters: 'logfile.directory:c:\\sites\\logs' + register: website + +''' From 3900643352b1da677847f00eb4e0b9f9d5cee9b6 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sun, 26 Apr 2015 22:27:53 +0100 Subject: [PATCH 156/720] documentation tweaks to fix missing arguments and specification of defaults --- clustering/consul | 99 ++++++++++++++++++++++++++------------- clustering/consul_acl | 13 ++++- clustering/consul_kv | 30 ++++++++---- clustering/consul_session | 38 +++++++++++++++ 4 files changed, 138 insertions(+), 42 deletions(-) diff --git a/clustering/consul b/clustering/consul index 15a68f068a2..fec55726539 100644 --- a/clustering/consul +++ b/clustering/consul @@ -24,19 +24,19 @@ short_description: "Add, modify & delete services within a consul cluster. description: - registers services and checks for an agent with a consul cluster. A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition - that will be used to notify the consul cluster of the health of the service. + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. Checks may also be registered per node e.g. disk usage, or cpu usage and notify the health of the entire node to the cluster. Service level checks do not require a check name or id as these are derived by Consul from the Service name and id respectively by appending 'service:'. - Node level checks require a check_name and optionally a check_id Currently, - there is no complete way to retrieve the script, interval or ttl metadata for - a registered check. Without this metadata it is not possible to tell if - the data supplied with ansible represents a change to a check. As a result - this does not attempt to determine changes and will always report a changed - occurred. An api method is planned to supply this metadata so at that stage - change management will be added. + Node level checks require a check_name and optionally a check_id. + Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An api method is planned to supply this metadata so at that + stage change management will be added. version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -45,71 +45,105 @@ options: - register or deregister the consul service, defaults to present required: true choices: ['present', 'absent'] + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false service_id: description: - the ID for the service, must be unique per node, defaults to the - service name + service name if the service name is supplied required: false + default: service_name if supplied host: description: - - host of the consul agent with which to register the service, - defaults to localhost + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running required: false + default: 8500 notes: description: - Notes to attach to check when registering it. - service_name: - desciption: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be ommitted if registering - a node level check required: false + default: None service_port: description: - the port on which the service is listening required for - registration of a service. - required: true + registration of a service, i.e. if service_name or service_id is set + required: false tags: description: - a list of tags that will be attached to the service registration. required: false + default: None script: description: - the script/command that will be run periodically to check the health - of the service + of the service. Scripts require an interval and vise versa required: false + default: None interval: description: - - the interval at which the service check will be run. This is by - convention a number with a s or m to signify the units of seconds - or minutes. if none is supplied, m will be appended + - the interval at which the service check will be run. This is a number + with a s or m suffix to signify the units of seconds or minutes e.g + 15s or 1m. If no suffix is supplied, m will be used by default e.g. + 1 will be 1m. Required if the script param is specified. + required: false + default: None check_id: description: - an ID for the service check, defaults to the check name, ignored if - part of service definition. + part of a service definition. + required: false + default: None check_name: description: - a name for the service check, defaults to the check id. required if standalone, ignored if part of service definition. + required: false + default: None + ttl: + description: + - checks can be registered with a ttl instead of a script and interval + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g 15s or 1m. If no suffix + is supplied, m will be used by default e.g. 1 will be 1m + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set. May be required to + register services. + required: false + default: None """ EXAMPLES = ''' - name: register nginx service with the local consul agent consul: name: nginx - port: 80 + service_port: 80 - name: register nginx service with curl check consul: name: nginx - port: 80 + service_port: 80 script: "curl http://localhost" interval: 60s - name: register nginx with some service tags consul: name: nginx - port: 80 + service_port: 80 tags: - prod - webservers @@ -432,23 +466,22 @@ class ConsulCheck(): def main(): module = AnsibleModule( argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), check_id=dict(required=False), check_name=dict(required=False), - host=dict(default='localhost'), - interval=dict(required=False, type='str'), - ttl=dict(required=False, type='str'), check_node=dict(required=False), check_host=dict(required=False), notes=dict(required=False), - port=dict(default=8500, type='int'), script=dict(required=False), service_id=dict(required=False), service_name=dict(required=False), service_port=dict(required=False, type='int'), state=dict(default='present', choices=['present', 'absent']), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), tags=dict(required=False, type='list'), - token=dict(required=False), - url=dict(default='http://localhost:8500') + token=dict(required=False) ), supports_check_mode=False, ) diff --git a/clustering/consul_acl b/clustering/consul_acl index cd5466c53b1..5e50c54431e 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -22,7 +22,8 @@ module: consul_acl short_description: "manipulate consul acl keys and rules" description: - allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/internals/acl.html version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -53,6 +54,16 @@ options: description: - an list of the rules that should be associated with a given key/token. required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ EXAMPLES = ''' diff --git a/clustering/consul_kv b/clustering/consul_kv index 8999a43319f..a9132a3d1c2 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -42,8 +42,9 @@ options: 'release' respectively. a valid session must be supplied to make the attempt changed will be true if the attempt is successful, false otherwise. - required: true + required: false choices: ['present', 'absent', 'acquire', 'release'] + default: present key: description: - the key at which the value should be stored. @@ -57,30 +58,43 @@ options: description: - if the key represents a prefix, each entry with the prefix can be retrieved by setting this to true. - required: true + required: false + default: false session: description: - the session that should be used to acquire or release a lock associated with a key/value pair + required: false + default: None token: description: - the token key indentifying an ACL rule set that controls access to the key value pair required: false - url: - description: - - location of the consul agent with which access the keay/value store, - defaults to http://localhost:8500 - required: false + default: None cas: description: - used when acquiring a lock with a session. If the cas is 0, then Consul will only put the key if it does not already exist. If the cas value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. + required: false + default: None flags: description: - opaque integer value that can be passed when setting a value. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ @@ -214,8 +228,8 @@ def main(): argument_spec = dict( cas=dict(required=False), flags=dict(required=False), - host=dict(default='localhost'), key=dict(required=True), + host=dict(default='localhost'), port=dict(default=8500, type='int'), recurse=dict(required=False, type='bool'), retrieve=dict(required=False, default=True), diff --git a/clustering/consul_session b/clustering/consul_session index 00f4cae7344..7088dc275ba 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -39,35 +39,73 @@ options: node name or session id is required as parameter. required: false choices: ['present', 'absent', 'info', 'node', 'list'] + default: present name: description: - the name that should be associated with the session. This is opaque to Consul and not required. required: false + default: None delay: description: - the optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar blocked from being acquired until this delay has expired. default: 15s + required: false node: description: - the name of the node that with which the session will be associated. by default this is the name of the agent. + required: false + default: None datacenter: description: - name of the datacenter in which the session exists or should be created. + required: false + default: None checks: description: - a list of checks that will be used to verify the session health. If all the checks fail, the session will be invalidated and any locks associated with the session will be release and can be acquired once the associated lock delay has expired. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ EXAMPLES = ''' +- name: register basic session with consul + consul_session: + name: session1 + +- name: register a session with an existing check + consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20 + +- name: retrieve info about session by id + consul_session: id=session_id state=info +- name: retrieve active sessions + consul_session: state=list ''' import sys From ef019e61631396637d7fd61668d97a213c99823c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 00:05:41 +0200 Subject: [PATCH 157/720] cloudstack: fix missing zone param used in get_vm() in utils --- cloud/cloudstack/cs_vmsnapshot.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index d53a33ac72e..7d9b47b56d8 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -41,6 +41,11 @@ options: - Snapshot memory if set to true. required: false default: false + zone: + description: + - Name of the zone in which the VM is in. If not set, default zone is used. + required: false + default: null project: description: - Name of the project the VM is assigned to. @@ -241,6 +246,7 @@ def main(): vm = dict(required=True), description = dict(default=None), project = dict(default=None), + zone = dict(default=None), snapshot_memory = dict(choices=BOOLEANS, default=False), state = dict(choices=['present', 'absent', 'revert'], default='present'), poll_async = dict(choices=BOOLEANS, default=True), From b0fb6b08a98665115ca4634d3351d67653cda4c4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 00:06:42 +0200 Subject: [PATCH 158/720] cloudstack: fix misssing doc about vm param --- cloud/cloudstack/cs_vmsnapshot.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 7d9b47b56d8..89c0ec081d6 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -31,6 +31,10 @@ options: - Unique Name of the snapshot. In CloudStack terms C(displayname). required: true aliases: ['displayname'] + vm: + description: + - Name of the virtual machine. + required: true description: description: - Description of the snapshot. From 3a6f57cbc0d1f307e87cdb8cf256452bb30d6cf6 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Mon, 27 Apr 2015 13:40:21 +0100 Subject: [PATCH 159/720] use module.fail_json to report import errors. document valid duration units --- clustering/consul | 20 ++++++++++++++------ clustering/consul_acl | 31 +++++++++++++++++++++--------- clustering/consul_kv | 19 ++++++++++++++----- clustering/consul_session | 40 +++++++++++++++++++++++++++------------ 4 files changed, 78 insertions(+), 32 deletions(-) diff --git a/clustering/consul b/clustering/consul index fec55726539..5db79e20c40 100644 --- a/clustering/consul +++ b/clustering/consul @@ -37,6 +37,9 @@ description: result this does not attempt to determine changes and will always report a changed occurred. An api method is planned to supply this metadata so at that stage change management will be added. +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -172,13 +175,11 @@ except ImportError: try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. "\ - "see http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) - -from requests.exceptions import ConnectionError - + python_consul_installed = False + def register_with_consul(module): state = module.params.get('state') @@ -462,6 +463,10 @@ class ConsulCheck(): except: pass +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") def main(): module = AnsibleModule( @@ -485,6 +490,9 @@ def main(): ), supports_check_mode=False, ) + + test_dependencies(module) + try: register_with_consul(module) except ConnectionError, e: diff --git a/clustering/consul_acl b/clustering/consul_acl index 5e50c54431e..c481b780a64 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -23,7 +23,11 @@ short_description: "manipulate consul acl keys and rules" description: - allows the addition, modification and deletion of ACL keys and associated rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/internals/acl.html + configuring ACLs, see https://www.consul.io/docs/internals/acl.html. +requirements: + - python-consul + - pyhcl + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -91,17 +95,16 @@ import urllib2 try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. "\ - "see http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) + python_consul_installed = False try: import hcl + pyhcl_installed = True except ImportError: - print "failed=True msg='pyhcl required for this module."\ - " see https://pypi.python.org/pypi/pyhcl'" - sys.exit(1) + pyhcl_installed = False from requests.exceptions import ConnectionError @@ -271,6 +274,7 @@ class Rule: def __str__(self): return '%s %s' % (self.key, self.policy) + def get_consul_api(module, token=None): if not token: token = token = module.params.get('token') @@ -278,6 +282,14 @@ def get_consul_api(module, token=None): port=module.params.get('port'), token=token) +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + + if not pyhcl_installed: + module.fail_json( msg="pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl") def main(): argument_spec = dict( @@ -291,9 +303,10 @@ def main(): token_type=dict( required=False, choices=['client', 'management'], default='client') ) + module = AnsibleModule(argument_spec, supports_check_mode=False) - module = AnsibleModule(argument_spec, supports_check_mode=True) - + test_dependencies(module) + try: execute(module) except ConnectionError, e: diff --git a/clustering/consul_kv b/clustering/consul_kv index a9132a3d1c2..e5a010a8c18 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -27,6 +27,9 @@ description: the indices, flags and session are returned as 'value'. If the key represents a prefix then Note that when a value is removed, the existing value if any is returned as part of the results. +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -126,10 +129,10 @@ except ImportError: try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print """failed=True msg='python-consul required for this module. \ - see http://python-consul.readthedocs.org/en/latest/#installation'""" - sys.exit(1) + python_consul_installed = False from requests.exceptions import ConnectionError @@ -222,7 +225,11 @@ def get_consul_api(module, token=None): port=module.params.get('port'), token=module.params.get('token')) - +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + def main(): argument_spec = dict( @@ -238,8 +245,10 @@ def main(): value=dict(required=False) ) - module = AnsibleModule(argument_spec, supports_check_mode=True) + module = AnsibleModule(argument_spec, supports_check_mode=False) + test_dependencies(module) + try: execute(module) except ConnectionError, e: diff --git a/clustering/consul_session b/clustering/consul_session index 7088dc275ba..8e6516891d2 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -25,6 +25,9 @@ description: cluster. These sessions can then be used in conjunction with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found here http://www.consul.io/docs/internals/sessions.html +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -50,7 +53,8 @@ options: description: - the optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. + acquired until this delay has expired. Valid units for delays + include 'ns', 'us', 'ms', 's', 'm', 'h' default: 15s required: false node: @@ -99,7 +103,7 @@ EXAMPLES = ''' - name: register a session with lock_delay consul_session: name: session_with_delay - delay: 20 + delay: 20s - name: retrieve info about session by id consul_session: id=session_id state=info @@ -113,12 +117,10 @@ import urllib2 try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. see "\ - "http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) - -from requests.errors import ConnectionError + python_consul_installed = False def execute(module): @@ -182,11 +184,11 @@ def update_session(module): changed = True try: - + session = consul.session.create( name=name, node=node, - lock_delay=delay, + lock_delay=validate_duration('delay', delay), dc=datacenter, checks=checks ) @@ -219,15 +221,27 @@ def remove_session(module): module.fail_json(msg="Could not remove session with id '%s' %s" % ( session_id, e)) +def validate_duration(name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration def get_consul_api(module): return consul.Consul(host=module.params.get('host'), port=module.params.get('port')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") def main(): argument_spec = dict( checks=dict(default=None, required=False, type='list'), - delay=dict(required=False,type='int', default=15), + delay=dict(required=False,type='str', default='15s'), host=dict(default='localhost'), port=dict(default=8500, type='int'), id=dict(required=False), @@ -237,8 +251,10 @@ def main(): choices=['present', 'absent', 'info', 'node', 'list']) ) - module = AnsibleModule(argument_spec, supports_check_mode=True) - + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + try: execute(module) except ConnectionError, e: From 764a0e26b6df02cf2924254589a065918b6ca5d6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 11:12:34 -0700 Subject: [PATCH 160/720] doc formatting --- cloud/cloudstack/cs_securitygroup_rule.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index a170230acac..709a9b562b3 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -53,28 +53,28 @@ options: - CIDR (full notation) to be used for security group rule. required: false default: '0.0.0.0/0' - user_security_group + user_security_group: description: - Security group this rule is based of. required: false default: null - start_port + start_port: description: - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). required: false default: null aliases: [ 'port' ] - end_port + end_port: description: - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. required: false default: null - icmp_type + icmp_type: description: - Type of the icmp message being sent. Required if C(protocol=icmp). required: false default: null - icmp_code + icmp_code: description: - Error code for this icmp message. Required if C(protocol=icmp). required: false From f7961bd227f6edee3d940c55db420f3fa35af26e Mon Sep 17 00:00:00 2001 From: NewGyu Date: Wed, 29 Apr 2015 23:59:16 +0900 Subject: [PATCH 161/720] fix cannot download SNAPSHOT version --- packaging/language/maven_artifact.py | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index 2aeb158625b..e0859dbf938 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -184,29 +184,12 @@ class MavenDownloader: if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) - basexpath = "/metadata/versioning/" - p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion") - if p: - return self._find_matching_artifact(p, artifact) + timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] + buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) else: return self._uri_for_artifact(artifact) - def _find_matching_artifact(self, elems, artifact): - filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems) - if artifact.classifier: - filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems) - - if len(filtered) > 1: - print( - "There was more than one match. Selecting the first one. Try adding a classifier to get a better match.") - elif not len(filtered): - print("There were no matches.") - return None - - elem = filtered[0] - value = elem.xpath("value/text()") - return self._uri_for_artifact(artifact, value[0]) - def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) @@ -309,7 +292,7 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state dest = dict(default=None), ) ) From 650b6e9b4ff890b5664858c901668c62bac3f6f9 Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Wed, 29 Apr 2015 14:26:16 -0700 Subject: [PATCH 162/720] Initial public revision. --- monitoring/circonus_annotation.py | 125 ++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 monitoring/circonus_annotation.py diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py new file mode 100644 index 00000000000..6324a1b6111 --- /dev/null +++ b/monitoring/circonus_annotation.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python + +import requests +import time +import json + +DOCUMENTATION = ''' +--- +module: circonus_annotation +short_description: create an annotation in circonus +description: + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided +author: Nick Harring +requirements: + - urllib3 + - requests + - time +options: + api_key: + description: + - Circonus API key + required: true + category: + description: + - Annotation Category + required: true + description: + description: + - Description of annotation + required: true + title: + description: + - Title of annotation + required: true + start: + description: + - Unix timestamp of event start, defaults to now + required: false + stop: + description: + - Unix timestamp of event end, defaults to now + duration + required: false + duration: + description: + - Duration in seconds of annotation, defaults to 0 + required: false +''' +EXAMPLES = ''' +# Create a simple annotation event with a source +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' +# Create an annotation with a duration of 5 minutes +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' + duration: 300 +# Create an annotation with a start_time and end_time +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + description: 'This is a detailed description of the config change' + category: 'This category groups like annotations' + start_time: 1395940006 + end_time: 1395954407 +''' +def post_annotation(annotation, api_key): + base_url = 'https://api.circonus.com/v2' + anootate_post_endpoint = '/annotation' + resp = requests.post(base_url + anootate_post_endpoint, headers=build_headers(api_key), data=json.dumps(annotation)) + resp.raise_for_status() + return resp + +def create_annotation(module): + annotation = {} + if module.params['duration'] != None: + duration = module.params['duration'] + else: + duration = 0 + if module.params['start'] != None: + start = module.params['start'] + else: + start = int(time.time()) + if module.params['stop'] != None: + stop = module.params['stop'] + else: + stop = int(time.time())+duration + annotation['start'] = int(start) + annotation['stop'] = int(stop) + annotation['category'] = module.params['category'] + annotation['description'] = module.params['description'] + annotation['title'] = module.params['title'] + return annotation + +def build_headers(api_token): + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} + return headers + +def main(): + module = AnsibleModule( + argument_spec = dict( + start = dict(required=False, type='int'), + stop = dict(required=False, type='int'), + category = dict(required=True), + title = dict(required=True), + description = dict(required=True), + duration = dict(required=False, type='int'), + api_key = dict(required=True) + ) + ) + annotation = create_annotation(module) + try: + resp = post_annotation(annotation, module.params['api_key']) + except requests.exceptions.RequestException as e: + module.fail_json(msg='Request Failed', reason=e) + module.exit_json(changed=True, annotation=resp.json()) + +from ansible.module_utils.basic import * +main() From 4ffb5f065ba66c36d06c1b7da5767e1cb5679bdc Mon Sep 17 00:00:00 2001 From: Robert Osowiecki Date: Thu, 30 Apr 2015 17:50:07 +0200 Subject: [PATCH 163/720] Using get_bin_path to find rmmod and modprobe --- system/modprobe.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/modprobe.py b/system/modprobe.py index 50c8f72fb2a..af845ae8cf5 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -97,13 +97,13 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) + rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: - rc, _, err = module.run_command(['rmmod', args['name']]) + rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True From dd02f24df9cf56a52320314488fa2994e96291ae Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Thu, 30 Apr 2015 10:16:26 -0700 Subject: [PATCH 164/720] Added Epic copyright notice, clarified examples. --- monitoring/circonus_annotation.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index 6324a1b6111..dd95402a7c1 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +# (c) 2014-2015, Epic Games, Inc. + import requests import time import json @@ -46,13 +48,13 @@ options: required: false ''' EXAMPLES = ''' -# Create a simple annotation event with a source +# Create a simple annotation event with a source, defaults to start and end time of now - circonus_annotation: api_key: XXXXXXXXXXXXXXXXX title: 'App Config Change' description: 'This is a detailed description of the config change' category: 'This category groups like annotations' -# Create an annotation with a duration of 5 minutes +# Create an annotation with a duration of 5 minutes and a default start time of now - circonus_annotation: api_key: XXXXXXXXXXXXXXXXX title: 'App Config Change' @@ -88,7 +90,7 @@ def create_annotation(module): if module.params['stop'] != None: stop = module.params['stop'] else: - stop = int(time.time())+duration + stop = int(time.time())+ duration annotation['start'] = int(start) annotation['stop'] = int(stop) annotation['category'] = module.params['category'] From 69e27f40396736e444f50c9c0339da25db465285 Mon Sep 17 00:00:00 2001 From: Roland Ramthun Date: Fri, 1 May 2015 10:01:56 +0200 Subject: [PATCH 165/720] disable ask on sync action --- packaging/os/portage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index ab96cb22e60..eb77baa14f6 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -231,7 +231,7 @@ def sync_repositories(module, webrsync=False): webrsync_path = module.get_bin_path('emerge-webrsync', required=True) cmd = '%s --quiet' % webrsync_path else: - cmd = '%s --sync --quiet' % module.emerge_path + cmd = '%s --sync --quiet --ask=n' % module.emerge_path rc, out, err = module.run_command(cmd) if rc != 0: From 0b18bdc57fbc119406522a78042c46a5170ce5b7 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 1 May 2015 17:32:29 +0200 Subject: [PATCH 166/720] cloudstack: add new module cs_instance Manages instances and virtual machines --- cloud/cloudstack/cs_instance.py | 787 ++++++++++++++++++++++++++++++++ 1 file changed, 787 insertions(+) create mode 100644 cloud/cloudstack/cs_instance.py diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py new file mode 100644 index 00000000000..62856c6d177 --- /dev/null +++ b/cloud/cloudstack/cs_instance.py @@ -0,0 +1,787 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instance +short_description: Manages instances and virtual machines on Apache CloudStack based clouds. +description: + - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Host name of the instance. C(name) can only contain ASCII letters. + required: true + display_name: + description: + - Custom display name of the instances. + required: false + default: null + group: + description: + - Group in where the new instance should be in. + required: false + default: null + state: + description: + - State of the instance. + required: false + default: 'present' + choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] + service_offering: + description: + - Name or id of the service offering of the new instance. If not set, first found service offering is used. + required: false + default: null + template: + description: + - Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option. + required: false + default: null + iso: + description: + - Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option. + required: false + default: null + hypervisor: + description: + - Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used. + required: false + default: null + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + keyboard: + description: + - Keyboard device type for the instance. + required: false + default: null + choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] + networks: + description: + - List of networks to use for the new instance. + required: false + default: [] + aliases: [ 'network' ] + ip_address: + description: + - IPv4 address for default instance's network during creation + required: false + default: null + ip6_address: + description: + - IPv6 address for default instance's network. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + required: false + default: null + disk_size: + description: + - Disk size in GByte required if deploying instance from ISO. + required: false + default: null + security_groups: + description: + - List of security groups the instance to be applied to. + required: false + default: [] + aliases: [ 'security_group' ] + project: + description: + - Name of the project the instance to be deployed in. + required: false + default: null + zone: + description: + - Name of the zone in which the instance shoud be deployed. If not set, default zone is used. + required: false + default: null + ssh_key: + description: + - Name of the SSH key to be deployed on the new instance. + required: false + default: null + affinity_groups: + description: + - Affinity groups names to be applied to the new instance. + required: false + default: [] + aliases: [ 'affinity_group' ] + user_data: + description: + - Optional data (ASCII) that can be sent to the instance upon a successful deployment. + - The data will be automatically base64 encoded. + - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. + required: false + default: null + force: + description: + - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. + required: false + default: true + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - If you want to delete all tags, set a empty list e.g. C(tags: []). + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a instance on CloudStack from an ISO +# NOTE: Names of offerings and ISOs depending on the CloudStack configuration. +- local_action: + module: cs_instance + name: web-vm-1 + iso: Linux Debian 7 64-bit + hypervisor: VMware + project: Integration + zone: ch-zrh-ix-01 + service_offering: 1cpu_1gb + disk_offering: PerfPlus Storage + disk_size: 20 + networks: + - Server Integration + - Sync Integration + - Storage Integration + + +# For changing a running instance, use the 'force' parameter +- local_action: + module: cs_instance + name: web-vm-1 + display_name: web-vm-01.example.com + iso: Linux Debian 7 64-bit + service_offering: 2cpu_2gb + force: yes + + +# Create or update a instance on Exoscale's public cloud +- local_action: + module: cs_instance + name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ssh_key: john@example.com + tags: + - { key: admin, value: john } + - { key: foo, value: bar } + register: vm + +- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}' + + +# Ensure a instance has stopped +- local_action: cs_instance name=web-vm-1 state=stopped + + +# Ensure a instance is running +- local_action: cs_instance name=web-vm-1 state=started + + +# Remove a instance +- local_action: cs_instance name=web-vm-1 state=absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: string + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: string + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: string + sample: web +created: + description: Date of the instance was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: boolean + sample: true +password: + description: The password of the instance if exists. + returned: success + type: string + sample: Ge2oe7Do +ssh_key: + description: Name of ssh key deployed to instance. + returned: success + type: string + sample: key@work +project: + description: Name of project the instance is related to. + returned: success + type: string + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: string + sample: 10.23.37.42 +public_ip: + description: Public IP address with instance via static nat rule. + returned: success + type: string + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +service_offering: + description: Name of the service offering the instance has. + returned: success + type: string + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: string + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: string + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +import base64 + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstance(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance = None + + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + + service_offerings = self.cs.listServiceOfferings() + if service_offerings: + if not service_offering: + return service_offerings['serviceoffering'][0]['id'] + + for s in service_offerings['serviceoffering']: + if service_offering in [ s['name'], s['id'] ]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + + def get_template_or_iso_id(self): + template = self.module.params.get('template') + iso = self.module.params.get('iso') + + if not template and not iso: + self.module.fail_json(msg="Template or ISO is required.") + + if template and iso: + self.module.fail_json(msg="Template are ISO are mutually exclusive.") + + if template: + templates = self.cs.listTemplates(templatefilter='executable') + if templates: + for t in templates['template']: + if template in [ t['displaytext'], t['name'], t['id'] ]: + return t['id'] + self.module.fail_json(msg="Template '%s' not found" % template) + + elif iso: + isos = self.cs.listIsos() + if isos: + for i in isos['iso']: + if iso in [ i['displaytext'], i['name'], i['id'] ]: + return i['id'] + self.module.fail_json(msg="ISO '%s' not found" % iso) + + + def get_disk_offering_id(self): + disk_offering = self.module.params.get('disk_offering') + + if not disk_offering: + return None + + disk_offerings = self.cs.listDiskOfferings() + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: + return d['id'] + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = {} + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + instances = self.cs.listVirtualMachines(**args) + if instances: + for v in instances['virtualmachine']: + if instance_name in [ v['name'], v['displayname'], v['id'] ]: + self.instance = v + break + return self.instance + + + def get_network_ids(self): + network_names = self.module.params.get('networks') + if not network_names: + return None + + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + network_ids = [] + network_displaytexts = [] + for network_name in network_names: + for n in networks['network']: + if network_name in [ n['displaytext'], n['name'], n['id'] ]: + network_ids.append(n['id']) + network_displaytexts.append(n['name']) + break + + if len(network_ids) != len(network_names): + self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) + + return ','.join(network_ids) + + + def present_instance(self): + instance = self.get_instance() + if not instance: + instance = self.deploy_instance() + else: + instance = self.update_instance(instance) + + instance = self.ensure_tags(resource=instance, resource_type='UserVm') + + return instance + + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data: + user_data = base64.b64encode(user_data) + return user_data + + + def get_display_name(self): + display_name = self.module.params.get('display_name') + if not display_name: + display_name = self.module.params.get('name') + return display_name + + + def deploy_instance(self): + self.result['changed'] = True + + args = {} + args['templateid'] = self.get_template_or_iso_id() + args['zoneid'] = self.get_zone_id() + args['serviceofferingid'] = self.get_service_offering_id() + args['projectid'] = self.get_project_id() + args['diskofferingid'] = self.get_disk_offering_id() + args['networkids'] = self.get_network_ids() + args['hypervisor'] = self.get_hypervisor() + args['userdata'] = self.get_user_data() + args['keyboard'] = self.module.params.get('keyboard') + args['ipaddress'] = self.module.params.get('ip_address') + args['ip6address'] = self.module.params.get('ip6_address') + args['name'] = self.module.params.get('name') + args['group'] = self.module.params.get('group') + args['keypair'] = self.module.params.get('ssh_key') + args['size'] = self.module.params.get('disk_size') + args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) + args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + + instance = None + if not self.module.check_mode: + instance = self.cs.deployVirtualMachine(**args) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def update_instance(self, instance): + args_service_offering = {} + args_service_offering['id'] = instance['id'] + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + + args_instance_update = {} + args_instance_update['id'] = instance['id'] + args_instance_update['group'] = self.module.params.get('group') + args_instance_update['displayname'] = self.get_display_name() + args_instance_update['userdata'] = self.get_user_data() + args_instance_update['ostypeid'] = self.get_os_type_id() + + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + args_ssh_key['projectid'] = self.get_project_id() + + if self._has_changed(args_service_offering, instance) or \ + self._has_changed(args_instance_update, instance) or \ + self._has_changed(args_ssh_key, instance): + + force = self.module.params.get('force') + instance_state = instance['state'].lower() + + if instance_state == 'stopped' or force: + self.result['changed'] = True + if not self.module.check_mode: + + # Ensure VM has stopped + instance = self.stop_instance() + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Change service offering + if self._has_changed(args_service_offering, instance): + res = self.cs.changeServiceForVirtualMachine(**args_service_offering) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Update VM + if self._has_changed(args_instance_update, instance): + res = self.cs.updateVirtualMachine(**args_instance_update) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Reset SSH key + if self._has_changed(args_ssh_key, instance): + instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Start VM again if it was running before + if instance_state == 'running': + instance = self.start_instance() + return instance + + + def absent_instance(self): + instance = self.get_instance() + if instance: + if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def expunge_instance(self): + instance = self.get_instance() + if instance: + res = {} + if instance['state'].lower() in [ 'destroying', 'destroyed' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.expungeVirtualMachine(id=instance['id']) + + elif instance['state'].lower() not in [ 'expunging' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def stop_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in ['stopping', 'stopped']: + return instance + + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.stopVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def start_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name')) + + if instance['state'].lower() in ['starting', 'running']: + return instance + + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.startVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def restart_instance(self): + instance = self.get_instance() + if not instance: + module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in [ 'running', 'starting' ]: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.rebootVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + + elif instance['state'].lower() in [ 'stopping', 'stopped' ]: + instance = self.start_instance() + return instance + + + def get_result(self, instance): + if instance: + if 'id' in instance: + self.result['id'] = instance['id'] + if 'name' in instance: + self.result['name'] = instance['name'] + if 'displayname' in instance: + self.result['display_name'] = instance['displayname'] + if 'group' in instance: + self.result['group'] = instance['group'] + if 'project' in instance: + self.result['project'] = instance['project'] + if 'publicip' in instance: + self.result['public_ip'] = instance['public_ip'] + if 'passwordenabled' in instance: + self.result['password_enabled'] = instance['passwordenabled'] + if 'password' in instance: + self.result['password'] = instance['password'] + if 'serviceofferingname' in instance: + self.result['service_offering'] = instance['serviceofferingname'] + if 'zonename' in instance: + self.result['zone'] = instance['zonename'] + if 'templatename' in instance: + self.result['template'] = instance['templatename'] + if 'isoname' in instance: + self.result['iso'] = instance['isoname'] + if 'keypair' in instance: + self.result['ssh_key'] = instance['keypair'] + if 'created' in instance: + self.result['created'] = instance['created'] + if 'state' in instance: + self.result['state'] = instance['state'] + if 'tags' in instance: + self.result['tags'] = [] + for tag in instance['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault']: + self.result['default_ip'] = nic['ipaddress'] + return self.result + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + display_name = dict(default=None), + group = dict(default=None), + state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering = dict(default=None), + template = dict(default=None), + iso = dict(default=None), + networks = dict(type='list', aliases=[ 'network' ], default=None), + ip_address = dict(defaul=None), + ip6_address = dict(defaul=None), + disk_offering = dict(default=None), + disk_size = dict(type='int', default=None), + keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), + hypervisor = dict(default=None), + security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), + affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + project = dict(default=None), + user_data = dict(default=None), + zone = dict(default=None), + ssh_key = dict(default=None), + force = dict(choices=BOOLEANS, default=False), + tags = dict(type='list', aliases=[ 'tag' ], default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_instance = AnsibleCloudStackInstance(module) + + state = module.params.get('state') + + if state in ['absent', 'destroyed']: + instance = acs_instance.absent_instance() + + elif state in ['expunged']: + instance = acs_instance.expunge_instance() + + elif state in ['present', 'deployed']: + instance = acs_instance.present_instance() + + elif state in ['stopped']: + instance = acs_instance.stop_instance() + + elif state in ['started']: + instance = acs_instance.start_instance() + + elif state in ['restarted']: + instance = acs_instance.restart_instance() + + if instance and 'state' in instance and instance['state'].lower() == 'error': + module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) + + result = acs_instance.get_result(instance) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From a79772deb126e262db82a2ee6f172f39d2b71e95 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 3 May 2015 20:58:21 +0100 Subject: [PATCH 167/720] Add webfaction modules --- cloud/webfaction/__init__.py | 0 cloud/webfaction/webfaction_app.py | 153 ++++++++++++++++++++ cloud/webfaction/webfaction_db.py | 147 +++++++++++++++++++ cloud/webfaction/webfaction_domain.py | 134 ++++++++++++++++++ cloud/webfaction/webfaction_mailbox.py | 112 +++++++++++++++ cloud/webfaction/webfaction_site.py | 189 +++++++++++++++++++++++++ 6 files changed, 735 insertions(+) create mode 100644 cloud/webfaction/__init__.py create mode 100644 cloud/webfaction/webfaction_app.py create mode 100644 cloud/webfaction/webfaction_db.py create mode 100644 cloud/webfaction/webfaction_domain.py create mode 100644 cloud/webfaction/webfaction_mailbox.py create mode 100644 cloud/webfaction/webfaction_site.py diff --git a/cloud/webfaction/__init__.py b/cloud/webfaction/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py new file mode 100644 index 00000000000..b1ddcd5a9c0 --- /dev/null +++ b/cloud/webfaction/webfaction_app.py @@ -0,0 +1,153 @@ +#! /usr/bin/python +# Create a Webfaction application using Ansible and the Webfaction API +# +# Valid application types can be found by looking here: +# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_app +short_description: Add or remove applications on a Webfaction host +description: + - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + name: + description: + - The name of the application + required: true + default: null + + state: + description: + - Whether the application should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list. + required: true + + autostart: + description: + - Whether the app should restart with an autostart.cgi script + required: false + default: "no" + + extra_info: + description: + - Any extra parameters required by the app + required: false + default: null + + open_port: + required: false + default: false + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + type = dict(required=True), + autostart = dict(required=False, choices=BOOLEANS, default='false'), + extra_info = dict(required=False, default=""), + port_open = dict(required=False, default="false"), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + app_name = module.params['name'] + app_type = module.params['type'] + app_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + app_list = webfaction.list_apps(session_id) + app_map = dict([(i['name'], i) for i in app_list]) + existing_app = app_map.get(app_name) + + result = {} + + # Here's where the real stuff happens + + if app_state == 'present': + + # Does an app with this name already exist? + if existing_app: + if existing_app['type'] != app_type: + module.fail_json(msg="App already exists with different type. Please fix by hand.") + + # If it exists with the right type, we don't change it + # Should check other parameters. + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, create the app + result.update( + webfaction.create_app( + session_id, app_name, app_type, + module.boolean(module.params['autostart']), + module.params['extra_info'], + module.boolean(module.params['port_open']) + ) + ) + + elif app_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_app: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_app(session_id, app_name) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(app_state)) + + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py new file mode 100644 index 00000000000..7205a084ef2 --- /dev/null +++ b/cloud/webfaction/webfaction_db.py @@ -0,0 +1,147 @@ +#! /usr/bin/python +# Create webfaction database using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_db +short_description: Add or remove a database on Webfaction +description: + - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + name: + description: + - The name of the database + required: true + default: null + + state: + description: + - Whether the database should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of database to create. + required: true + choices: ['mysql', 'postgresql'] + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + # This will also create a default DB user with the same + # name as the database, and the specified password. + + - name: Create a database + webfaction_db: + name: "{{webfaction_user}}_db1" + password: mytestsql + type: mysql + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + # You can specify an IP address or hostname. + type = dict(required=True, default=None), + password = dict(required=False, default=None), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + db_name = module.params['name'] + db_state = module.params['state'] + db_type = module.params['type'] + db_passwd = module.params['password'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + db_list = webfaction.list_dbs(session_id) + db_map = dict([(i['name'], i) for i in db_list]) + existing_db = db_map.get(db_name) + + result = {} + + # Here's where the real stuff happens + + if db_state == 'present': + + # Does an app with this name already exist? + if existing_db: + # Yes, but of a different type - fail + if existing_db['db_type'] != db_type: + module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") + + # If it exists with the right type, we don't change anything. + module.exit_json( + changed = False, + ) + + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_db( + session_id, db_name, db_type, db_passwd + ) + ) + + elif db_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_db: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(db_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py new file mode 100644 index 00000000000..2f3c8542754 --- /dev/null +++ b/cloud/webfaction/webfaction_domain.py @@ -0,0 +1,134 @@ +#! /usr/bin/python +# Create Webfaction domains and subdomains using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_domain +short_description: Add or remove domains and subdomains on Webfaction +description: + - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the domain + required: true + default: null + + state: + description: + - Whether the domain should exist + required: false + choices: ['present', 'absent'] + default: "present" + + subdomains: + description: + - Any subdomains to create. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + subdomains = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + domain_name = module.params['name'] + domain_state = module.params['state'] + domain_subdomains = module.params['subdomains'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + domain_list = webfaction.list_domains(session_id) + domain_map = dict([(i['domain'], i) for i in domain_list]) + existing_domain = domain_map.get(domain_name) + + result = {} + + # Here's where the real stuff happens + + if domain_state == 'present': + + # Does an app with this name already exist? + if existing_domain: + + if set(existing_domain['subdomains']) >= set(domain_subdomains): + # If it exists with the right subdomains, we don't change anything. + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_domain( + *positional_args + ) + ) + + elif domain_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_domain: + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_domain(*positional_args) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(domain_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py new file mode 100644 index 00000000000..3ac848d6a94 --- /dev/null +++ b/cloud/webfaction/webfaction_mailbox.py @@ -0,0 +1,112 @@ +#! /usr/bin/python +# Create webfaction mailbox using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser and Andy Baker 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_mailbox +short_description: Add or remove mailboxes on Webfaction +description: + - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + mailbox_name: + description: + - The name of the mailbox + required: true + default: null + + mailbox_password: + description: + - The password for the mailbox + required: true + default: null + + state: + description: + - Whether the mailbox should exist + required: false + choices: ['present', 'absent'] + default: "present" + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec=dict( + mailbox_name=dict(required=True, default=None), + mailbox_password=dict(required=True), + state=dict(required=False, default='present'), + login_name=dict(required=True), + login_password=dict(required=True), + ), + supports_check_mode=True + ) + + mailbox_name = module.params['mailbox_name'] + site_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + mailbox_list = webfaction.list_mailboxes(session_id) + existing_mailbox = mailbox_name in mailbox_list + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a mailbox with this name already exist? + if existing_mailbox: + module.exit_json(changed=False,) + + positional_args = [session_id, mailbox_name] + + if not module.check_mode: + # If this isn't a dry run, create the mailbox + result.update(webfaction.create_mailbox(*positional_args)) + + elif site_state == 'absent': + + # If the mailbox is already not there, nothing changed. + if not existing_mailbox: + module.exit_json(changed=False) + + if not module.check_mode: + # If this isn't a dry run, delete the mailbox + result.update(webfaction.delete_mailbox(session_id, mailbox_name)) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json(changed=True, result=result) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py new file mode 100644 index 00000000000..5db89355966 --- /dev/null +++ b/cloud/webfaction/webfaction_site.py @@ -0,0 +1,189 @@ +#! /usr/bin/python +# Create Webfaction website using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_site +short_description: Add or remove a website on a Webfaction host +description: + - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. + - If a site of the same name exists in the account but on a different host, the operation will exit. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the website + required: true + default: null + + state: + description: + - Whether the website should exist + required: false + choices: ['present', 'absent'] + default: "present" + + host: + description: + - The webfaction host on which the site should be created. + required: true + + https: + description: + - Whether or not to use HTTPS + required: false + choices: BOOLEANS + default: 'false' + + site_apps: + description: + - A mapping of URLs to apps + required: false + + subdomains: + description: + - A list of subdomains associated with this site. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: create website + webfaction_site: + name: testsite1 + state: present + host: myhost.webfaction.com + subdomains: + - 'testsite1.my_domain.org' + site_apps: + - ['testapp1', '/'] + https: no + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + # You can specify an IP address or hostname. + host = dict(required=True, default=None), + https = dict(required=False, choices=BOOLEANS, default='false'), + subdomains = dict(required=False, default=[]), + site_apps = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + site_name = module.params['name'] + site_state = module.params['state'] + site_host = module.params['host'] + site_ip = socket.gethostbyname(site_host) + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + site_list = webfaction.list_websites(session_id) + site_map = dict([(i['name'], i) for i in site_list]) + existing_site = site_map.get(site_name) + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a site with this name already exist? + if existing_site: + + # If yes, but it's on a different IP address, then fail. + # If we wanted to allow relocation, we could add a 'relocate=true' option + # which would get the existing IP address, delete the site there, and create it + # at the new address. A bit dangerous, perhaps, so for now we'll require manual + # deletion if it's on another host. + + if existing_site['ip'] != site_ip: + module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") + + # If it's on this host and the key parameters are the same, nothing needs to be done. + + if (existing_site['https'] == module.boolean(module.params['https'])) and \ + (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ + (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): + module.exit_json( + changed = False + ) + + positional_args = [ + session_id, site_name, site_ip, + module.boolean(module.params['https']), + module.params['subdomains'], + ] + for a in module.params['site_apps']: + positional_args.append( (a[0], a[1]) ) + + if not module.check_mode: + # If this isn't a dry run, create or modify the site + result.update( + webfaction.create_website( + *positional_args + ) if not existing_site else webfaction.update_website ( + *positional_args + ) + ) + + elif site_state == 'absent': + + # If the site's already not there, nothing changed. + if not existing_site: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the site + result.update( + webfaction.delete_website(session_id, site_name, site_ip) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + From d1d65fe544c6a26263778643da07d3fd77bb482e Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 3 May 2015 23:48:51 +0100 Subject: [PATCH 168/720] Tidying of webfaction modules --- cloud/webfaction/webfaction_app.py | 12 +++++------- cloud/webfaction/webfaction_db.py | 10 ++++------ cloud/webfaction/webfaction_domain.py | 8 +++----- cloud/webfaction/webfaction_mailbox.py | 9 ++++----- cloud/webfaction/webfaction_site.py | 14 +++++++------- 5 files changed, 23 insertions(+), 30 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index b1ddcd5a9c0..08a0205eb87 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -13,7 +13,7 @@ short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -23,7 +23,6 @@ options: description: - The name of the application required: true - default: null state: description: @@ -65,7 +64,6 @@ options: ''' import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -73,12 +71,12 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), type = dict(required=True), - autostart = dict(required=False, choices=BOOLEANS, default='false'), + autostart = dict(required=False, choices=BOOLEANS, default=False), extra_info = dict(required=False, default=""), - port_open = dict(required=False, default="false"), + port_open = dict(required=False, choices=BOOLEANS, default=False), login_name = dict(required=True), login_password = dict(required=True), ), @@ -148,6 +146,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 7205a084ef2..479540abc5c 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -10,7 +10,7 @@ short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -20,7 +20,6 @@ options: description: - The name of the database required: true - default: null state: description: @@ -61,7 +60,6 @@ EXAMPLES = ''' import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -69,10 +67,10 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), # You can specify an IP address or hostname. - type = dict(required=True, default=None), + type = dict(required=True), password = dict(required=False, default=None), login_name = dict(required=True), login_password = dict(required=True), @@ -142,6 +140,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 2f3c8542754..a9e2b7dd9bb 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -10,7 +10,7 @@ short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." @@ -22,7 +22,6 @@ options: description: - The name of the domain required: true - default: null state: description: @@ -50,7 +49,6 @@ options: import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -58,7 +56,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), @@ -129,6 +127,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 3ac848d6a94..1ba571a1dd1 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -10,7 +10,7 @@ short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -20,7 +20,6 @@ options: description: - The name of the mailbox required: true - default: null mailbox_password: description: @@ -48,7 +47,6 @@ options: import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -56,7 +54,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - mailbox_name=dict(required=True, default=None), + mailbox_name=dict(required=True), mailbox_password=dict(required=True), state=dict(required=False, default='present'), login_name=dict(required=True), @@ -107,6 +105,7 @@ def main(): module.exit_json(changed=True, result=result) -# The conventional ending + +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 5db89355966..575e6eec996 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -10,7 +10,7 @@ short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. - If a site of the same name exists in the account but on a different host, the operation will exit. @@ -23,7 +23,6 @@ options: description: - The name of the website required: true - default: null state: description: @@ -83,7 +82,6 @@ EXAMPLES = ''' import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -91,11 +89,11 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), # You can specify an IP address or hostname. - host = dict(required=True, default=None), - https = dict(required=False, choices=BOOLEANS, default='false'), + host = dict(required=True), + https = dict(required=False, choices=BOOLEANS, default=False), subdomains = dict(required=False, default=[]), site_apps = dict(required=False, default=[]), login_name = dict(required=True), @@ -184,6 +182,8 @@ def main(): result = result ) -# The conventional ending + + +from ansible.module_utils.basic import * main() From 7c675705f32ae8fcd26942bfc7e9b2c26b63dba5 Mon Sep 17 00:00:00 2001 From: Sterling Windmill Date: Mon, 4 May 2015 15:57:40 -0400 Subject: [PATCH 169/720] Allow for specifying name instead of host as per the documentation at http://docs.ansible.com/known_hosts_module.html --- system/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index 30ea7755553..86876cd4931 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -82,7 +82,7 @@ def enforce_state(module, params): Add or remove key. """ - host = params["host"] + host = params["name"] key = params.get("key",None) port = params.get("port",None) #expand the path parameter; otherwise module.add_path_info From 28b0f3ce132dd78e0407d5f95838d97fd69824b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 13:24:21 -0700 Subject: [PATCH 170/720] Fix documentation formatting --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 62856c6d177..9b14f1a9834 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -142,7 +142,7 @@ options: tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). - - If you want to delete all tags, set a empty list e.g. C(tags: []). + - "If you want to delete all tags, set a empty list e.g. C(tags: [])." required: false default: null poll_async: From 2007345c4b1b061f904155ac02b49a8efa4c3dca Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Mon, 4 May 2015 14:38:43 -0700 Subject: [PATCH 171/720] ec2_win_password module for windows ec2 instances --- cloud/amazon/ec2_win_password.py | 96 ++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 cloud/amazon/ec2_win_password.py diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py new file mode 100644 index 00000000000..ee795d5b62a --- /dev/null +++ b/cloud/amazon/ec2_win_password.py @@ -0,0 +1,96 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +--- +module: ec2_win_password +short_description: gets the default administrator password for ec2 windows instances +description: + - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto. +version_added: "2.0" +author: Rick Mendes +options: + instance_id: + description: + - The instance id to get the password data from. + required: true + default: null + aliases: [] + key_file: + description: + - path to the file containing the key pair used on the instance + required: true + default: null + aliases: [] + region: + description: + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + default: null + aliases: [ 'aws_region', 'ec2_region' ] + +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Example of getting a password +tasks: +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_test_key.pem" +''' + +import sys +from base64 import b64decode +from os.path import expanduser +from Crypto.Cipher import PKCS1_v1_5 +from Crypto.PublicKey import RSA + +try: + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + instance_id = dict(required=True), + key_file = dict(required=True), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='Boto required for this module.') + + instance_id = module.params.get('instance_id') + key_file = expanduser(module.params.get('key_file')) + + ec2 = ec2_connect(module) + + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + + f = open(key_file, 'r') + key = RSA.importKey(f.read()) + cipher = PKCS1_v1_5.new(key) + sentinel = 'password decryption failed!!!' + + try: + decrypted = cipher.decrypt(decoded, sentinel) + except ValueError as e: + decrypted = None + + if decrypted == None: + module.exit_json(win_password='', changed=False) + else: + module.exit_json(win_password=decrypted, changed=True) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 1e744a885a6c0f890165846397d8eecbfb175cd2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 15:32:06 +0200 Subject: [PATCH 172/720] cloudstack: doc fixes --- cloud/cloudstack/cs_affinitygroup.py | 3 ++- cloud/cloudstack/cs_firewall.py | 3 ++- cloud/cloudstack/cs_iso.py | 3 ++- cloud/cloudstack/cs_securitygroup.py | 4 +++- cloud/cloudstack/cs_securitygroup_rule.py | 3 ++- cloud/cloudstack/cs_sshkeypair.py | 4 ++-- cloud/cloudstack/cs_vmsnapshot.py | 3 ++- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 59c21ee46f6..593f0840bae 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_affinitygroup short_description: Manages affinity groups on Apache CloudStack based clouds. -description: Create and remove affinity groups. +description: + - Create and remove affinity groups. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 9049f40f7c4..91e9e0405c6 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -21,7 +21,8 @@ DOCUMENTATION = ''' module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. -description: Creates and removes firewall rules. +description: + - Creates and removes firewall rules. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 42f00fb1f00..b416fbb3356 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_iso short_description: Manages ISOs images on Apache CloudStack based clouds. -description: Register and remove ISO images. +description: + - Register and remove ISO images. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 4e2856d5a90..8b8659cdc94 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -19,9 +19,11 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' +--- module: cs_securitygroup short_description: Manages security groups on Apache CloudStack based clouds. -description: Create and remove security groups. +description: + - Create and remove security groups. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 709a9b562b3..80d271d90c0 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_securitygroup_rule short_description: Manages security group rules on Apache CloudStack based clouds. -description: Add and remove security group rules. +description: + - Add and remove security group rules. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 9cc514c05ea..34ace0aa1f2 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: - - If no key was found and no public key was provided and a new SSH - private/public key pair will be created and the private key will be returned. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 89c0ec081d6..bb27b2de978 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_vmsnapshot short_description: Manages VM snapshots on Apache CloudStack based clouds. -description: Create, remove and revert VM from snapshots. +description: + - Create, remove and revert VM from snapshots. version_added: '2.0' author: René Moser options: From dfa9037091cdd5414f653b6045ad900af3006f6f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 15:53:55 +0200 Subject: [PATCH 173/720] cloudstack: fix missing doc fragments --- cloud/cloudstack/cs_affinitygroup.py | 1 + cloud/cloudstack/cs_firewall.py | 1 + cloud/cloudstack/cs_instance.py | 1 + cloud/cloudstack/cs_iso.py | 1 + cloud/cloudstack/cs_securitygroup.py | 1 + cloud/cloudstack/cs_sshkeypair.py | 1 + cloud/cloudstack/cs_vmsnapshot.py | 1 + 7 files changed, 7 insertions(+) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 593f0840bae..07b9cf42d6a 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -52,6 +52,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 91e9e0405c6..13f114c1b35 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -72,6 +72,7 @@ options: - Name of the project. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 9b14f1a9834..8680f20ada5 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -150,6 +150,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index b416fbb3356..83af1e1783e 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -95,6 +95,7 @@ options: required: false default: 'present' choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 8b8659cdc94..50556da5bb3 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -47,6 +47,7 @@ options: - Name of the project the security group to be created in. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 34ace0aa1f2..8dd02dcd1f1 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -48,6 +48,7 @@ options: - String of the public key. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index bb27b2de978..dad660cd77c 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -67,6 +67,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' From 8438ef995e206c230be3a06e4c016a0c1142c151 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 16:17:05 +0200 Subject: [PATCH 174/720] cloudstack: fix missing doc fragment in cs_securitygroup_rule --- cloud/cloudstack/cs_securitygroup_rule.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 80d271d90c0..1f2dac6f267 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -90,6 +90,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' From 4e21eb09db44406a065dbfabc83fa94869acf849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Fievet?= <_@sebastien-fievet.fr> Date: Tue, 5 May 2015 16:58:25 +0200 Subject: [PATCH 175/720] Typo --- monitoring/pingdom.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 6f658cd9505..0ae1af357e0 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -111,7 +111,7 @@ def main(): ) if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") + module.fail_json(msg="Missing required pingdom module (check docs)") checkid = module.params['checkid'] state = module.params['state'] From 34b81a875691bb111aa5062bcadc3802ec4d4813 Mon Sep 17 00:00:00 2001 From: Julien Vey Date: Wed, 6 May 2015 10:48:28 +0200 Subject: [PATCH 176/720] [homebew_cask] Be consistent in the documentation The documentation for the `state` field is not very clear. It says possible values are "installed, uninstalled" and default value is "present" The examples below alow uses `present` and `absent`. This patch uses "absent" and "present" instead of "installed" and "uninstalled" Moreover, this is consistent with other packaging modules, like homebrew itself --- packaging/os/homebrew_cask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index dede8d4bb36..75acead517b 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -32,7 +32,7 @@ options: state: description: - state of the cask - choices: [ 'installed', 'uninstalled' ] + choices: [ 'present', 'absent' ] required: false default: present ''' From a52a4b33e7bade35a723e9f1cfcb388acf4cec94 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 6 May 2015 14:53:46 +0200 Subject: [PATCH 177/720] cloudstack: new module cs_instancegroup --- cloud/cloudstack/cs_instancegroup.py | 228 +++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 cloud/cloudstack/cs_instancegroup.py diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py new file mode 100644 index 00000000000..2c47a9f6f25 --- /dev/null +++ b/cloud/cloudstack/cs_instancegroup.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instancegroup +short_description: Manages instance groups on Apache CloudStack based clouds. +description: + - Create and remove instance groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the instance group. + required: true + domain: + description: + - Domain the instance group is related to. + required: false + default: null + account: + description: + - Account the instance group is related to. + required: false + default: null + project: + description: + - Project the instance group is related to. + required: false + default: null + state: + description: + - State of the instance group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# Create an instance group +- local_action: + module: cs_instancegroup + name: loadbalancers + + +# Remove an instance group +- local_action: + module: cs_instancegroup + name: loadbalancers + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance group. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance group. + returned: success + type: string + sample: webservers +created: + description: Date when the instance group was created. + returned: success + type: string + sample: 2015-05-03T15:05:51+0200 +domain: + description: Domain the instance group is related to. + returned: success + type: string + sample: example domain +account: + description: Account the instance group is related to. + returned: success + type: string + sample: example account +project: + description: Project the instance group is related to. + returned: success + type: string + sample: example project +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstanceGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance_group = None + + + def get_instance_group(self): + if self.instance_group: + return self.instance_group + + name = self.module.params.get('name') + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + instance_groups = self.cs.listInstanceGroups(**args) + if instance_groups: + for g in instance_groups['instancegroup']: + if name in [ g['name'], g['id'] ]: + self.instance_group = g + break + return self.instance_group + + + def present_instance_group(self): + instance_group = self.get_instance_group() + if not instance_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + if not self.module.check_mode: + res = self.cs.createInstanceGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance_group = res['instancegroup'] + return instance_group + + + def absent_instance_group(self): + instance_group = self.get_instance_group() + if instance_group: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteInstanceGroup(id=instance_group['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return instance_group + + + def get_result(self, instance_group): + if instance_group: + if 'id' in instance_group: + self.result['id'] = instance_group['id'] + if 'created' in instance_group: + self.result['created'] = instance_group['created'] + if 'name' in instance_group: + self.result['name'] = instance_group['name'] + if 'project' in instance_group: + self.result['project'] = instance_group['project'] + if 'domain' in instance_group: + self.result['domain'] = instance_group['domain'] + if 'account' in instance_group: + self.result['account'] = instance_group['account'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(default='present', choices=['present', 'absent']), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ig = AnsibleCloudStackInstanceGroup(module) + + state = module.params.get('state') + if state in ['absent']: + instance_group = acs_ig.absent_instance_group() + else: + instance_group = acs_ig.present_instance_group() + + result = acs_ig.get_result(instance_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From ce1b3d257c87efea16af6f83c3a8eb828e695c70 Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Wed, 6 May 2015 08:50:14 -0700 Subject: [PATCH 178/720] house cleaning based on code review --- cloud/amazon/ec2_win_password.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index ee795d5b62a..33a6ae7f947 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -13,14 +13,10 @@ options: description: - The instance id to get the password data from. required: true - default: null - aliases: [] key_file: description: - path to the file containing the key pair used on the instance required: true - default: null - aliases: [] region: description: - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -42,7 +38,6 @@ tasks: key_file: "~/aws-creds/my_test_key.pem" ''' -import sys from base64 import b64decode from os.path import expanduser from Crypto.Cipher import PKCS1_v1_5 From c0ca227717f8ad85bb954da2873c41e7409dec8c Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Wed, 6 May 2015 09:43:38 -0700 Subject: [PATCH 179/720] PEP-8 style fixes --- monitoring/circonus_annotation.py | 32 +++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index dd95402a7c1..75ca6540cbe 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -71,13 +71,16 @@ EXAMPLES = ''' end_time: 1395954407 ''' def post_annotation(annotation, api_key): + ''' Takes annotation dict and api_key string''' base_url = 'https://api.circonus.com/v2' anootate_post_endpoint = '/annotation' - resp = requests.post(base_url + anootate_post_endpoint, headers=build_headers(api_key), data=json.dumps(annotation)) + resp = requests.post(base_url + anootate_post_endpoint, + headers=build_headers(api_key), data=json.dumps(annotation)) resp.raise_for_status() return resp def create_annotation(module): + ''' Takes ansible module object ''' annotation = {} if module.params['duration'] != None: duration = module.params['duration'] @@ -97,23 +100,24 @@ def create_annotation(module): annotation['description'] = module.params['description'] annotation['title'] = module.params['title'] return annotation - def build_headers(api_token): - headers = {'X-Circonus-App-Name': 'ansible', - 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, - 'Accept': 'application/json'} + '''Takes api token, returns headers with it included.''' + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} return headers def main(): + '''Main function, dispatches logic''' module = AnsibleModule( - argument_spec = dict( - start = dict(required=False, type='int'), - stop = dict(required=False, type='int'), - category = dict(required=True), - title = dict(required=True), - description = dict(required=True), - duration = dict(required=False, type='int'), - api_key = dict(required=True) + argument_spec=dict( + start=dict(required=False, type='int'), + stop=dict(required=False, type='int'), + category=dict(required=True), + title=dict(required=True), + description=dict(required=True), + duration=dict(required=False, type='int'), + api_key=dict(required=True) ) ) annotation = create_annotation(module) @@ -123,5 +127,5 @@ def main(): module.fail_json(msg='Request Failed', reason=e) module.exit_json(changed=True, annotation=resp.json()) -from ansible.module_utils.basic import * +from ansible.module_utils.basic import AnsibleModule main() From d8d1ca85dfef130d641727a2794a265a3521eff1 Mon Sep 17 00:00:00 2001 From: Jeff Rizzo Date: Wed, 6 May 2015 11:41:12 -0700 Subject: [PATCH 180/720] Support both new and old pkgin versions by testing if '-p' flag exists. --- packaging/os/pkgin.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py index f4c203e56e0..9f25094210c 100644 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -76,9 +76,19 @@ def query_package(module, pkgin_path, name): * False - not installed or not found """ + # test whether '-p' (parsable) flag is supported. + rc, out, err = module.run_command("%s -p -v" % pkgin_path) + + if rc == 0: + pflag = '-p' + splitchar = ';' + else: + pflag = '' + splitchar = ' ' + # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. - rc, out, err = module.run_command("%s search \"^%s$\"" % (pkgin_path, name)) + rc, out, err = module.run_command("%s %s search \"^%s$\"" % (pkgin_path, pflag, name)) # rc will not be 0 unless the search was a success if rc == 0: @@ -93,7 +103,7 @@ def query_package(module, pkgin_path, name): # '<' - installed but out of date # '=' - installed and up to date # '>' - installed but newer than the repository version - pkgname_with_version, raw_state = out.split(' ')[0:2] + pkgname_with_version, raw_state = out.split(splitchar)[0:2] # Strip version # (results in sth like 'gcc47-libs') From fa97d665b99f6e56a8ef6342fdda6a42e24b35f8 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:14:22 +0100 Subject: [PATCH 181/720] add module rabbitmq_binding --- messaging/rabbitmq_binding.py | 188 ++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 messaging/rabbitmq_binding.py diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py new file mode 100644 index 00000000000..17433ecdd22 --- /dev/null +++ b/messaging/rabbitmq_binding.py @@ -0,0 +1,188 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +module: rabbitmq_binding +author: Manuel Sousa +version_added: 1.5.4 + +short_description: This module manages rabbitMQ bindings +description: + - This module uses rabbitMQ Rest API to create/delete bindings +requirements: [ python requests ] +options: + state: + description: + - Whether the exchange should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + name: + description: + - source exchange to create binding on + required: true + aliases: [ "src", "source" ] + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + - default vhost is / + required: false + default: "/" + destination: + description: + - destination exchange or queue for the binding + required: true + aliases: [ "dst", "dest" ] + destinationType: + description: + - Either queue or exchange + required: true + choices: [ "queue", "exchange" ] + aliases: [ "type", "destType" ] + routingKey: + description: + - routing key for the binding + - default is # + required: false + default: "#" + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false +''' + +EXAMPLES = ''' +# Bind myQueue to directExchange with routing key info +- rabbitmq_binding: name=directExchange destination=myQueue type=queue routingKey=info + +# Bind directExchange to topicExchange with routing key *.info +- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routingKey="*.info" +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, aliases=[ "src", "source" ], type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str'), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + destination = dict(required=True, aliases=[ "dst", "dest"], type='str'), + destinationType = dict(required=True, aliases=[ "type", "destType"], choices=[ "queue", "exchange" ],type='str'), + routingKey = dict(default='#', type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'], + "q" if module.params['destinationType'] == "queue" else "e", + module.params['destination'], + urllib.quote(module.params['routingKey'],'') + ) + + # Check if exchange already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + bindingExists = True + response = r.json() + elif r.status_code==404: + bindingExists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if exchange exists", + details = r.text + ) + + changeRequired = not bindingExists if module.params['state']=='present' else bindingExists + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= changeRequired, + result = "Success", + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if changeRequired: + if module.params['state'] == 'present': + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'], + "q" if module.params['destinationType'] == "queue" else "e", + module.params['destination'] + ) + + r = requests.post( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "routing_key": module.params['routingKey'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204 or r.status_code == 201: + module.exit_json( + changed = True, + result = "Success", + name = module.params['name'], + destination = module.params['destination'] + ) + else: + module.fail_json( + msg = "Error creating exchange", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + result = "Success", + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() From 308305a691c2b90fd354662b5d121db4de65a68d Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:15:36 +0100 Subject: [PATCH 182/720] add module rabbitmq_exchange --- messaging/rabbitmq_exchange.py | 197 +++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 messaging/rabbitmq_exchange.py diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py new file mode 100644 index 00000000000..ac2fee61812 --- /dev/null +++ b/messaging/rabbitmq_exchange.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +module: rabbitmq_exchange +author: Manuel Sousa +version_added: 1.5.4 + +short_description: This module manages rabbitMQ exchanges +description: + - This module uses rabbitMQ Rest API to create/delete exchanges +requirements: [ python requests ] +options: + name: + description: + - Name of the exchange to create + required: true + state: + description: + - Whether the exchange should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + required: false + default: "/" + durable: + description: + - whether exchange is durable or not + required: false + choices: [ "yes", "no" ] + default: yes + exchangeType: + description: + - type for the exchange + required: false + choices: [ "fanout", "direct", "headers", "topic" ] + aliases: [ "type" ] + default: direct + autoDelete: + description: + - if the exchange should delete itself after all queues/exchanges unbound from it + required: false + choices: [ "yes", "no" ] + default: no + internal: + description: + - exchange is available only for other exchanges + required: false + choices: [ "yes", "no" ] + default: no + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false +''' + +EXAMPLES = ''' +# Create direct exchange +- rabbitmq_exchange: name=directExchange + +# Create topic exchange on vhost +- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str'), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + durable = dict(default=True, choices=BOOLEANS, type='bool'), + autoDelete = dict(default=False, choices=BOOLEANS, type='bool'), + internal = dict(default=False, choices=BOOLEANS, type='bool'), + exchangeType = dict(default='direct', aliases=['type'], type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + url = "http://%s:%s/api/exchanges/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'] + ) + + # Check if exchange already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + exchangeExists = True + response = r.json() + elif r.status_code==404: + exchangeExists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if exchange exists", + details = r.text + ) + + changeRequired = not exchangeExists if module.params['state']=='present' else exchangeExists + + # Check if attributes change on existing exchange + if not changeRequired and r.status_code==200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['autoDelete'] and + response['internal'] == module.params['internal'] and + response['type'] == module.params['exchangeType'] + ): + module.fail_json( + msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges" + ) + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= changeRequired, + result = "Success", + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if changeRequired: + if module.params['state'] == 'present': + r = requests.put( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['autoDelete'], + "internal": module.params['internal'], + "type": module.params['exchangeType'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204: + module.exit_json( + changed = True, + result = "Success", + name = module.params['name'] + ) + else: + module.fail_json( + msg = "Error creating exchange", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + result = "Success", + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() From 99f4f592fabc8f9f84ccc55971a5fa2b1797a3a0 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:16:41 +0100 Subject: [PATCH 183/720] add module rabbitmq_queue --- messaging/rabbitmq_queue.py | 235 ++++++++++++++++++++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100644 messaging/rabbitmq_queue.py diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py new file mode 100644 index 00000000000..8abfb447551 --- /dev/null +++ b/messaging/rabbitmq_queue.py @@ -0,0 +1,235 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +module: rabbitmq_queue +author: Manuel Sousa +version_added: 1.5.4 + +short_description: This module manages rabbitMQ queues +description: + - This module uses rabbitMQ Rest API to create/delete queues +requirements: [ python requests ] +options: + name: + description: + - Name of the queue to create + required: true + state: + description: + - Whether the queue should be present or absent + - Only present implemented atm + choices: [ "present", "absent" ] + required: false + default: present + login_user: + description: + - rabbitMQ user for connection + required: false + default: guest + login_password: + description: + - rabbitMQ password for connection + required: false + default: false + login_host: + description: + - rabbitMQ host for connection + required: false + default: localhost + login_port: + description: + - rabbitMQ management api port + required: false + default: 15672 + vhost: + description: + - rabbitMQ virtual host + required: false + default: "/" + durable: + description: + - whether queue is durable or not + required: false + choices: [ "yes", "no" ] + default: yes + autoDelete: + description: + - if the queue should delete itself after all queues/queues unbound from it + required: false + choices: [ "yes", "no" ] + default: no + messageTTL: + description: + - How long a message can live in queue before it is discarded (milliseconds) + required: False + autoExpires: + description: + - How long a queue can be unused before it is automatically deleted (milliseconds) + required: false + maxLength: + description: + - How many messages can the queue contain before it starts rejecting + required: false + deadLetterExchange: + description: + - Optional name of an exchange to which messages will be republished if they + - are rejected or expire + required: false + deadLetterRoutingKey: + description: + - Optional replacement routing key to use when a message is dead-lettered. + - Original routing key will be used if unset + required: false + arguments: + description: + - extra arguments for queue. If defined this argument is a key/value dictionary + required: false +''' + +EXAMPLES = ''' +# Create a queue +- rabbitmq_queue: name=myQueue + +# Create a queue on remote host +- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org +''' + +import requests +import urllib +import json + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + name = dict(required=True, type='str'), + login_user = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str'), + login_host = dict(default='localhost', type='str'), + login_port = dict(default='15672', type='str'), + vhost = dict(default='/', type='str'), + durable = dict(default=True, choices=BOOLEANS, type='bool'), + autoDelete = dict(default=False, choices=BOOLEANS, type='bool'), + messageTTL = dict(default=None, type='int'), + autoExpire = dict(default=None, type='int'), + maxLength = dict(default=None, type='int'), + deadLetterExchange = dict(default=None, type='str'), + deadLetterRoutingKey = dict(default=None, type='str'), + arguments = dict(default=dict(), type='dict') + ), + supports_check_mode = True + ) + + url = "http://%s:%s/api/queues/%s/%s" % ( + module.params['login_host'], + module.params['login_port'], + urllib.quote(module.params['vhost'],''), + module.params['name'] + ) + + # Check if queue already exists + r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) + + if r.status_code==200: + queueExists = True + response = r.json() + elif r.status_code==404: + queueExists = False + response = r.text + else: + module.fail_json( + msg = "Invalid response from RESTAPI when trying to check if queue exists", + details = r.text + ) + + changeRequired = not queueExists if module.params['state']=='present' else queueExists + + # Check if attributes change on existing queue + if not changeRequired and r.status_code==200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['autoDelete'] and + ( + response['arguments']['x-message-ttl'] == module.params['messageTTL'] if 'x-message-ttl' in response['arguments'] else module.params['messageTTL'] is None + ) and + ( + response['arguments']['x-expires'] == module.params['autoExpire'] if 'x-expires' in response['arguments'] else module.params['autoExpire'] is None + ) and + ( + response['arguments']['x-max-length'] == module.params['maxLength'] if 'x-max-length' in response['arguments'] else module.params['maxLength'] is None + ) and + ( + response['arguments']['x-dead-letter-exchange'] == module.params['deadLetterExchange'] if 'x-dead-letter-exchange' in response['arguments'] else module.params['deadLetterExchange'] is None + ) and + ( + response['arguments']['x-dead-letter-routing-key'] == module.params['deadLetterRoutingKey'] if 'x-dead-letter-routing-key' in response['arguments'] else module.params['deadLetterRoutingKey'] is None + ) + ): + module.fail_json( + msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues", + details = "XPTO", + src = json.dumps(response['arguments']), + dest = json.dumps(module.params) + ) + + + # Copy parameters to arguments as used by RabbitMQ + for k,v in { + 'messageTTL': 'x-message-ttl', + 'autoExpire': 'x-expires', + 'maxLength': 'x-max-length', + 'deadLetterExchange': 'x-dead-letter-exchange', + 'deadLetterRoutingKey': 'x-dead-letter-routing-key' + }.items(): + if module.params[k]: + module.params['arguments'][v] = module.params[k] + + # Exit if check_mode + if module.check_mode: + module.exit_json( + changed= changeRequired, + result = "Success", + name = module.params['name'], + details = response, + arguments = module.params['arguments'] + ) + + # Do changes + if changeRequired: + if module.params['state'] == 'present': + r = requests.put( + url, + auth = (module.params['login_user'],module.params['login_password']), + headers = { "content-type": "application/json"}, + data = json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['autoDelete'], + "arguments": module.params['arguments'] + }) + ) + elif module.params['state'] == 'absent': + r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password'])) + + if r.status_code == 204: + module.exit_json( + changed = True, + result = "Success", + name = module.params['name'] + ) + else: + module.fail_json( + msg = "Error creating queue", + status = r.status_code, + details = r.text + ) + + else: + module.exit_json( + changed = False, + result = "Success", + name = module.params['name'] + ) + +# import module snippets +from ansible.module_utils.basic import * +main() From 4ea269f0babca77c119e6781b36d880849af3429 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:28:53 +0100 Subject: [PATCH 184/720] Change version and remove result="success" --- messaging/rabbitmq_queue.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 8abfb447551..34d107e3856 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' module: rabbitmq_queue author: Manuel Sousa -version_added: 1.5.4 +version_added: 2.0 short_description: This module manages rabbitMQ queues description: @@ -188,7 +188,6 @@ def main(): if module.check_mode: module.exit_json( changed= changeRequired, - result = "Success", name = module.params['name'], details = response, arguments = module.params['arguments'] @@ -213,7 +212,6 @@ def main(): if r.status_code == 204: module.exit_json( changed = True, - result = "Success", name = module.params['name'] ) else: @@ -226,7 +224,6 @@ def main(): else: module.exit_json( changed = False, - result = "Success", name = module.params['name'] ) From 4843e06fcd232203e69f3a8a93a3ec1e94662e4d Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:30:05 +0100 Subject: [PATCH 185/720] Change version and remove result="success" --- messaging/rabbitmq_exchange.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index ac2fee61812..b7dbd00be04 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' module: rabbitmq_exchange author: Manuel Sousa -version_added: 1.5.4 +version_added: 2.0 short_description: This module manages rabbitMQ exchanges description: @@ -148,7 +148,6 @@ def main(): if module.check_mode: module.exit_json( changed= changeRequired, - result = "Success", name = module.params['name'], details = response, arguments = module.params['arguments'] @@ -175,7 +174,6 @@ def main(): if r.status_code == 204: module.exit_json( changed = True, - result = "Success", name = module.params['name'] ) else: @@ -188,7 +186,6 @@ def main(): else: module.exit_json( changed = False, - result = "Success", name = module.params['name'] ) From e87422ee967d99c31be9494c8c7334a0021b7941 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 8 May 2015 17:30:41 +0100 Subject: [PATCH 186/720] Change version and remove result="success" --- messaging/rabbitmq_binding.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index 17433ecdd22..731e52ba969 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' module: rabbitmq_binding author: Manuel Sousa -version_added: 1.5.4 +version_added: 2.0 short_description: This module manages rabbitMQ bindings description: @@ -132,7 +132,6 @@ def main(): if module.check_mode: module.exit_json( changed= changeRequired, - result = "Success", name = module.params['name'], details = response, arguments = module.params['arguments'] @@ -165,7 +164,6 @@ def main(): if r.status_code == 204 or r.status_code == 201: module.exit_json( changed = True, - result = "Success", name = module.params['name'], destination = module.params['destination'] ) @@ -179,7 +177,6 @@ def main(): else: module.exit_json( changed = False, - result = "Success", name = module.params['name'] ) From 2c4ea937712ba8508ea0956cc4a0731733085d82 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 8 May 2015 13:42:20 -0500 Subject: [PATCH 187/720] Add compileall testing via travis to validate modules are python24 compatible --- .travis.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000..62bbff5cd69 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: python +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 +script: + - python2.4 -m compileall -fq -x 'cloud/' . From deb11be684e5cbd0974401615d9926d13221ff5c Mon Sep 17 00:00:00 2001 From: Ian Cordasco Date: Fri, 8 May 2015 21:18:59 -0500 Subject: [PATCH 188/720] Interpolate container name into error message In the event failed to start, the container name was not being properly interpolated into the error message. --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 5f0f6bb2ad6..f64161d1807 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -794,7 +794,7 @@ class LxcContainerManagement(object): rc=1, msg='The container [ %s ] failed to start. Check to lxc is' ' available and that the container is in a functional' - ' state.' + ' state.' % self.container_name ) def _check_archive(self): From 69c0a6360bfbbf6356f92cdfcdadce2600e80c22 Mon Sep 17 00:00:00 2001 From: fdupoux Date: Sat, 9 May 2015 14:06:58 +0100 Subject: [PATCH 189/720] Suppress prompts from lvcreate using --yes when LVM supports this option --- system/lvol.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d9be9e7dc70..49bd713e16d 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -83,6 +83,8 @@ import re decimal_point = re.compile(r"(\.|,)") +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) def parse_lvs(data): lvs = [] @@ -95,6 +97,17 @@ def parse_lvs(data): return lvs +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command("%s version" % (ver_cmd)) + if rc != 0: + return None + m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + def main(): module = AnsibleModule( argument_spec=dict( @@ -107,6 +120,13 @@ def main(): supports_check_mode=True, ) + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found == None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + yesopt = "--yes" if version_found >= version_yesopt else "" + vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] @@ -187,7 +207,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s %s -n %s -%s %s%s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From a0ef5e4a5973f850fdcc019896ede93fe8595675 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 20:40:50 +0100 Subject: [PATCH 190/720] Documentation version_added numbers are strings. --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 08a0205eb87..dec5f8e5d5e 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -13,7 +13,7 @@ short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 479540abc5c..fc522439591 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -10,7 +10,7 @@ short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index a9e2b7dd9bb..31339014e6c 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -10,7 +10,7 @@ short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 1ba571a1dd1..5eb82df3eaa 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -10,7 +10,7 @@ short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 575e6eec996..c981a21fc2b 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -10,7 +10,7 @@ short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. - If a site of the same name exists in the account but on a different host, the operation will exit. From de28b84bf79a3b36e95aa4fabf6b080736bceee7 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 20:47:31 +0100 Subject: [PATCH 191/720] Available choices for 'state' explicitly listed. --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index dec5f8e5d5e..05b31f55a4a 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -72,7 +72,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), type = dict(required=True), autostart = dict(required=False, choices=BOOLEANS, default=False), extra_info = dict(required=False, default=""), diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index fc522439591..784477c5409 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -68,7 +68,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. type = dict(required=True), password = dict(required=False, default=None), diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 31339014e6c..8548c4fba37 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -57,7 +57,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), login_password = dict(required=True), diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 5eb82df3eaa..fee5700e50e 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -56,7 +56,7 @@ def main(): argument_spec=dict( mailbox_name=dict(required=True), mailbox_password=dict(required=True), - state=dict(required=False, default='present'), + state=dict(required=False, choices=['present', 'absent'], default='present'), login_name=dict(required=True), login_password=dict(required=True), ), diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index c981a21fc2b..a5be4f5407b 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -90,7 +90,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host = dict(required=True), https = dict(required=False, choices=BOOLEANS, default=False), From 3645b61f46ae2e4a436401735bb4a4227516e3b5 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 22:07:49 +0100 Subject: [PATCH 192/720] Add examples. --- cloud/webfaction/webfaction_app.py | 10 ++++++++++ cloud/webfaction/webfaction_domain.py | 20 ++++++++++++++++++++ cloud/webfaction/webfaction_mailbox.py | 10 ++++++++++ 3 files changed, 40 insertions(+) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 05b31f55a4a..20e94a7b5f6 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -63,6 +63,16 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a test app + webfaction_app: + name="my_wsgi_app1" + state=present + type=mod_wsgi35-python27 + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + import xmlrpclib webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 8548c4fba37..c99a0f23f6d 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -47,6 +47,26 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a test domain + webfaction_domain: + name: mydomain.com + state: present + subdomains: + - www + - blog + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + - name: Delete test domain and any subdomains + webfaction_domain: + name: mydomain.com + state: absent + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + +''' + import socket import xmlrpclib diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index fee5700e50e..87ca1fd1a26 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -45,6 +45,16 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a mailbox + webfaction_mailbox: + mailbox_name="mybox" + mailbox_password="myboxpw" + state=present + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + import socket import xmlrpclib From 0fbce5f9ddd2214ab7f8e5cc18779f6535a26452 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Sun, 10 May 2015 19:39:26 -0500 Subject: [PATCH 193/720] Changing from v1 to v2 hipchat api format. --- notification/hipchat.py | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 24fde9ecb35..ea81d2f55f4 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -58,13 +58,13 @@ options: description: - API url if using a self-hosted hipchat server required: false - default: 'https://api.hipchat.com/v1/rooms/message' + default: 'https://api.hipchat.com/v2/room/{id_or_name}/message' version_added: 1.6.0 # informational: requirements for nodes -requirements: [ urllib, urllib2 ] -author: WAKAYAMA Shirou +requirements: [ urllib, urllib2, requests, json ] +author: WAKAYAMA Shirou, BOURDEL Paul ''' EXAMPLES = ''' @@ -75,32 +75,27 @@ EXAMPLES = ''' # HipChat module specific support methods. # -MSG_URI = "https://api.hipchat.com/v1/rooms/message" +MSG_URI = "https://api.hipchat.com/v2/room/{id_or_name}/message" +NOTIFY_URI = "https://api.hipchat.com/v2/room/{id_or_name}/notification" def send_msg(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI): '''sending message to hipchat''' - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - + + payload = {'message': msg, 'color': color} + url_params = {'auth_token': token} if notify: - params['notify'] = 1 + POST_URL = NOTIFY_URI else: - params['notify'] = 0 + POST_URL = MSG_URI + + response = requests.post(POST_URL.replace('{id_or_name}',room), json=payload, params=url_params) - url = api + "?auth_token=%s" % (token) - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() + if response.status_code == 201 or response.status_code == 204: + return response.json else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + module.fail_json(msg="failed to send message, return status=%s" % str(response.status_code)) # =========================================== @@ -137,7 +132,7 @@ def main(): try: send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to send msg: %s" % e) + module.fail_json(msg="unable to sent msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) @@ -145,5 +140,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * +import requests, json main() From f0508fa30ea6d98431dc2600792e333b86c67cd8 Mon Sep 17 00:00:00 2001 From: Jean-Fred Berthelot Date: Sat, 25 Apr 2015 12:56:35 +0100 Subject: [PATCH 194/720] Add check_mode support to HipChat module The HipChat module declares to support check_mode, but the message is sent in any case. With this, if executed in check mode, the module will exit before actually sending the message to HipChat. It will return changed=False, as per the convention for notifications modules. --- notification/hipchat.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 24fde9ecb35..060babf08d8 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -96,6 +96,11 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text', url = api + "?auth_token=%s" % (token) data = urllib.urlencode(params) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() @@ -119,8 +124,8 @@ def main(): "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), - validate_certs = dict(default='yes', type='bool'), - api = dict(default=MSG_URI), + validate_certs=dict(default='yes', type='bool'), + api=dict(default=MSG_URI), ), supports_check_mode=True ) From e568032bc373b8328cc5faa81a3da0f893cb4c2d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 8 May 2015 16:36:15 -0500 Subject: [PATCH 195/720] Fix up modules that have python24 syntax error --- .travis.yml | 2 +- database/vertica/vertica_user.py | 5 ++- files/patch.py | 2 +- monitoring/bigpanda.py | 2 +- monitoring/boundary_meter.py | 2 +- monitoring/librato_annotation.py | 4 +-- notification/slack.py | 4 +-- packaging/os/pkg5.py | 11 ++++-- packaging/os/pkg5_publisher.py | 13 ++++--- packaging/os/pkgng.py | 5 ++- packaging/os/portage.py | 4 ++- system/alternatives.py | 2 +- system/crypttab.py | 44 +++++++++++++++++------ system/gluster_volume.py | 4 +-- system/locale_gen.py | 54 +++++++++++++++++------------ web_infrastructure/ejabberd_user.py | 2 +- web_infrastructure/jira.py | 2 +- 17 files changed, 104 insertions(+), 58 deletions(-) diff --git a/.travis.yml b/.travis.yml index 62bbff5cd69..834a139e482 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,4 +7,4 @@ addons: packages: - python2.4 script: - - python2.4 -m compileall -fq -x 'cloud/' . + - python2.4 -m compileall -fq -x 'cloud/|zabbix|layman.py|maven_artifact.py' . diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index a011bf35adb..576e8b887ef 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -233,7 +233,10 @@ def present(user_facts, cursor, user, profile, resource_pool, changed = False query_fragments = ["alter user {0}".format(user)] if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): - state = 'lock' if locked else 'unlock' + if locked: + state = 'lock' + else: + state = 'unlock' query_fragments.append("account {0}".format(state)) changed = True if password and password != user_facts[user_key]['password']: diff --git a/files/patch.py b/files/patch.py index ec3a3b02c00..e0d7d1d335d 100755 --- a/files/patch.py +++ b/files/patch.py @@ -110,7 +110,7 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru (rc, out, err) = patch_func(opts) if rc != 0: - msg = out if not err else err + msg = err or out raise PatchError(msg) diff --git a/monitoring/bigpanda.py b/monitoring/bigpanda.py index 11950287078..b1a45145ede 100644 --- a/monitoring/bigpanda.py +++ b/monitoring/bigpanda.py @@ -162,7 +162,7 @@ def main(): module.exit_json(changed=True, **deployment) else: module.fail_json(msg=json.dumps(info)) - except Exception as e: + except Exception, e: module.fail_json(msg=str(e)) # import module snippets diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index da739d4306f..f6d84328597 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -213,7 +213,7 @@ def download_request(module, name, apiid, apikey, cert_type): cert_file = open(cert_file_path, 'w') cert_file.write(body) cert_file.close - os.chmod(cert_file_path, 0o600) + os.chmod(cert_file_path, 0600) except: module.fail_json("Could not write to certificate file") diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index 63979f41bfb..161caebb07c 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -138,11 +138,11 @@ def post_annotation(module): headers = {} headers['Content-Type'] = 'application/json' - headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() + headers['Authorization'] = "Basic " + base64.b64encode(user + ":" + api_key).strip() req = urllib2.Request(url, json_body, headers) try: response = urllib2.urlopen(req) - except urllib2.HTTPError as e: + except urllib2.HTTPError, e: module.fail_json(msg="Request Failed", reason=e.reason) response = response.read() module.exit_json(changed=True, annotation=response) diff --git a/notification/slack.py b/notification/slack.py index 7e8a81c811b..19af9d9d093 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -141,9 +141,9 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj else: payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: - if (channel[0] == '#') or (channel[0] == '@') + if (channel[0] == '#') or (channel[0] == '@'): payload['channel'] = channel - else + else: payload['channel'] = '#'+channel if username is not None: payload['username'] = username diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index b250a02850c..6adbff7f331 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -128,13 +128,18 @@ def ensure(module, state, packages, params): }, } + if params['accept_licenses']: + accept_licenses = ['--accept'] + else: + accept_licenses = [] + to_modify = filter(behaviour[state]['filter'], packages) if to_modify: rc, out, err = module.run_command( [ 'pkg', behaviour[state]['subcommand'] ] - + (['--accept'] if params['accept_licenses'] else []) + + accept_licenses + [ '-q', '--' ] + to_modify @@ -151,12 +156,12 @@ def ensure(module, state, packages, params): def is_installed(module, package): rc, out, err = module.run_command(['pkg', 'list', '--', package]) - return True if rc == 0 else False + return not bool(int(rc)) def is_latest(module, package): rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) - return True if rc == 1 else False + return bool(int(rc)) from ansible.module_utils.basic import * diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 63c62059203..53d7ad821f2 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -122,10 +122,15 @@ def set_publisher(module, params): args.append('--remove-mirror=*') args.extend(['--add-mirror=' + u for u in params['mirror']]) - if params['sticky'] != None: - args.append('--sticky' if params['sticky'] else '--non-sticky') - if params['enabled'] != None: - args.append('--enable' if params['enabled'] else '--disable') + if params['sticky'] != None and params['sticky']: + args.append('--sticky') + elif params['sticky'] != None: + args.append('--non-sticky') + + if params['enabled'] != None and params['enabled']: + args.append('--enable') + elif params['enabled'] != None: + args.append('--disable') rc, out, err = module.run_command( ["pkg", "set-publisher"] + args + [name], diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index 1aa8e0c737f..c54affbee22 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -252,9 +252,8 @@ def annotate_packages(module, pkgng_path, packages, annotation): for package in packages: for _annotation in annotations: - annotate_c += ( 1 if operation[_annotation['operation']]( - module, pkgng_path, package, - _annotation['tag'], _annotation['value']) else 0 ) + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 if annotate_c > 0: return (True, "added %s annotations." % annotate_c) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index eb77baa14f6..124008c6522 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -422,7 +422,9 @@ def main(): if not p['package']: module.exit_json(msg='Sync successfully finished.') - packages = p['package'].split(',') if p['package'] else [] + packages = [] + if p['package']: + packages.extend(p['package'].split(',')) if p['depclean']: if packages and p['state'] not in portage_absent_states: diff --git a/system/alternatives.py b/system/alternatives.py index ff4de59cf11..62669db9b14 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -132,7 +132,7 @@ def main(): ) module.exit_json(changed=True) - except subprocess.CalledProcessError as cpe: + except subprocess.CalledProcessError, cpe: module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) diff --git a/system/crypttab.py b/system/crypttab.py index ccd4102c66b..5621190244a 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -155,8 +155,9 @@ def main(): if changed and not module.check_mode: - with open(path, 'wb') as f: - f.write(str(crypttab)) + f = open(path, 'wb') + f.write(str(crypttab)) + f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -172,9 +173,10 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path,'a').close() - with open(path, 'r') as f: - for line in f.readlines(): - self._lines.append(Line(line)) + f = open(path, 'r') + for line in f.readlines(): + self._lines.append(Line(line)) + f.close() def add(self, line): self._lines.append(line) @@ -242,10 +244,19 @@ class Line(object): def _split_line(self, line): fields = line.split() + try: + field2 = field[2] + except IndexError: + field2 = None + try: + field3 = field[3] + except IndexError: + field3 = None + return (fields[0], fields[1], - fields[2] if len(fields) >= 3 else None, - fields[3] if len(fields) >= 4 else None) + field2, + fields3) def remove(self): self.line, self.name, self.backing_device = '', None, None @@ -260,7 +271,10 @@ class Line(object): if self.valid(): fields = [self.name, self.backing_device] if self.password is not None or self.opts: - fields.append(self.password if self.password is not None else 'none') + if self.password is not None: + fields.append(self.password) + else: + self.password('none') if self.opts: fields.append(str(self.opts)) return ' '.join(fields) @@ -276,7 +290,10 @@ class Options(dict): if opts_string is not None: for opt in opts_string.split(','): kv = opt.split('=') - k, v = (kv[0], kv[1]) if len(kv) > 1 else (kv[0], None) + if len(kv) > 1: + k, v = (kv[0], kv[1]) + else: + k, v = (kv[0], None) self[k] = v def add(self, opts_string): @@ -324,8 +341,13 @@ class Options(dict): and sorted(self.items()) == sorted(obj.items())) def __str__(self): - return ','.join([k if v is None else '%s=%s' % (k, v) - for k, v in self.items()]) + ret = [] + for k, v in self.items(): + if v is None: + ret.append(k) + else: + ret.append('%s=%s' % (k, v)) + return ','.join(ret) # import module snippets from ansible.module_utils.basic import * diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 2a8bc74df72..04a75d93ce0 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -145,7 +145,7 @@ def run_gluster(gargs, **kwargs): try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) except Exception, e: module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) return out @@ -167,7 +167,7 @@ def run_gluster_yes(gargs): args.extend(gargs) rc, out, err = module.run_command(args, data='y\n') if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) return out def get_peers(): diff --git a/system/locale_gen.py b/system/locale_gen.py index c5943cd63a0..ea40b598d4e 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -55,11 +55,12 @@ def is_available(name, ubuntuMode): __locales_available = '/etc/locale.gen' re_compiled = re.compile(__regexp) - with open(__locales_available, 'r') as fd: - for line in fd: - result = re_compiled.match(line) - if result and result.group('locale') == name: - return True + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() return False def is_present(name): @@ -76,10 +77,12 @@ def fix_case(name): def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" - with open("/etc/locale.gen", "r") as f: - lines = [line.replace(existing_line, new_line) for line in f] - with open("/etc/locale.gen", "w") as f: - f.write("".join(lines)) + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + f.close() + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + f.close() def set_locale(name, enabled=True): """ Sets the state of the locale. Defaults to enabled. """ @@ -88,10 +91,12 @@ def set_locale(name, enabled=True): new_string = '%s \g' % (name) else: new_string = '# %s \g' % (name) - with open("/etc/locale.gen", "r") as f: - lines = [re.sub(search_string, new_string, line) for line in f] - with open("/etc/locale.gen", "w") as f: - f.write("".join(lines)) + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + f.close() + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + f.close() def apply_change(targetState, name): """Create or remove locale. @@ -124,13 +129,15 @@ def apply_change_ubuntu(targetState, name): localeGenExitValue = call(["locale-gen", name]) else: # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - with open("/var/lib/locales/supported.d/local", "r") as f: - content = f.readlines() - with open("/var/lib/locales/supported.d/local", "w") as f: - for line in content: - locale, charset = line.split(' ') - if locale != name: - f.write(line) + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + f.close() + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + f.close() # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! localeGenExitValue = call(["locale-gen", "--purge"]) @@ -168,7 +175,10 @@ def main(): module.fail_json(msg="The locales you've entered is not available " "on your system.") - prev_state = "present" if is_present(name) else "absent" + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" changed = (prev_state!=state) if module.check_mode: @@ -180,7 +190,7 @@ def main(): apply_change(state, name) else: apply_change_ubuntu(state, name) - except EnvironmentError as e: + except EnvironmentError, e: module.fail_json(msg=e.strerror, exitValue=e.errno) module.exit_json(name=name, changed=changed, msg="OK") diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py index d8b0384679c..bc54351e657 100755 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -113,7 +113,7 @@ class EjabberdUser(object): (rc, out, err) = self.run_command('check_account', options) except EjabberdUserException, e: (rc, out, err) = (1, None, "required attribute(s) missing") - return True if rc == 0 else False + return not bool(int(rc)) def log(self, entry): """ This method will log information to the local syslog facility """ diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py index 950fc3dbfcf..437a21b6769 100644 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -335,7 +335,7 @@ def main(): ret = method(restbase, user, passwd, module.params) - except Exception as e: + except Exception, e: return module.fail_json(msg=e.message) From 265fefd7ec58ee4eac72eee999d0ed85bc067c65 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 11 May 2015 09:58:07 -0500 Subject: [PATCH 196/720] Give pushover a .py file extension --- notification/{pushover => pushover.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename notification/{pushover => pushover.py} (100%) diff --git a/notification/pushover b/notification/pushover.py similarity index 100% rename from notification/pushover rename to notification/pushover.py From 1b3b0caa5154ebe488f663bad2d53804528fc63d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 11 May 2015 10:09:56 -0500 Subject: [PATCH 197/720] Give consul modules a .py extension --- clustering/{consul => consul.py} | 0 clustering/{consul_acl => consul_acl.py} | 0 clustering/{consul_kv => consul_kv.py} | 0 clustering/{consul_session => consul_session.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename clustering/{consul => consul.py} (100%) rename clustering/{consul_acl => consul_acl.py} (100%) rename clustering/{consul_kv => consul_kv.py} (100%) rename clustering/{consul_session => consul_session.py} (100%) diff --git a/clustering/consul b/clustering/consul.py similarity index 100% rename from clustering/consul rename to clustering/consul.py diff --git a/clustering/consul_acl b/clustering/consul_acl.py similarity index 100% rename from clustering/consul_acl rename to clustering/consul_acl.py diff --git a/clustering/consul_kv b/clustering/consul_kv.py similarity index 100% rename from clustering/consul_kv rename to clustering/consul_kv.py diff --git a/clustering/consul_session b/clustering/consul_session.py similarity index 100% rename from clustering/consul_session rename to clustering/consul_session.py From c8bd6635482cc622259e2e750fc12bf0284122c9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 11 May 2015 10:20:54 -0500 Subject: [PATCH 198/720] Exclude consul modules from py24 syntax check --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 834a139e482..f3f8329c76a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,4 +7,4 @@ addons: packages: - python2.4 script: - - python2.4 -m compileall -fq -x 'cloud/|zabbix|layman.py|maven_artifact.py' . + - python2.4 -m compileall -fq -x 'cloud/|zabbix|layman.py|maven_artifact.py|consul' . From a258606694e8b93ed77928263bf77b23014f604c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 11 May 2015 10:29:28 -0500 Subject: [PATCH 199/720] Use try/finally with file opening to close the file --- system/crypttab.py | 18 +++++++++------ system/locale_gen.py | 54 +++++++++++++++++++++++++++----------------- 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/system/crypttab.py b/system/crypttab.py index 5621190244a..52f3e75576a 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -155,9 +155,11 @@ def main(): if changed and not module.check_mode: - f = open(path, 'wb') - f.write(str(crypttab)) - f.close() + try: + f = open(path, 'wb') + f.write(str(crypttab)) + finally: + f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -173,10 +175,12 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path,'a').close() - f = open(path, 'r') - for line in f.readlines(): - self._lines.append(Line(line)) - f.close() + try: + f = open(path, 'r') + for line in f.readlines(): + self._lines.append(Line(line)) + finally: + f.close() def add(self, line): self._lines.append(line) diff --git a/system/locale_gen.py b/system/locale_gen.py index ea40b598d4e..c4b2af7dc1b 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -77,12 +77,16 @@ def fix_case(name): def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" - f = open("/etc/locale.gen", "r") - lines = [line.replace(existing_line, new_line) for line in f] - f.close() - f = open("/etc/locale.gen", "w") - f.write("".join(lines)) - f.close() + try: + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() def set_locale(name, enabled=True): """ Sets the state of the locale. Defaults to enabled. """ @@ -91,12 +95,16 @@ def set_locale(name, enabled=True): new_string = '%s \g' % (name) else: new_string = '# %s \g' % (name) - f = open("/etc/locale.gen", "r") - lines = [re.sub(search_string, new_string, line) for line in f] - f.close() - f = open("/etc/locale.gen", "w") - f.write("".join(lines)) - f.close() + try: + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() def apply_change(targetState, name): """Create or remove locale. @@ -129,15 +137,19 @@ def apply_change_ubuntu(targetState, name): localeGenExitValue = call(["locale-gen", name]) else: # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - f = open("/var/lib/locales/supported.d/local", "r") - content = f.readlines() - f.close() - f = open("/var/lib/locales/supported.d/local", "w") - for line in content: - locale, charset = line.split(' ') - if locale != name: - f.write(line) - f.close() + try: + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + finally: + f.close() # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! localeGenExitValue = call(["locale-gen", "--purge"]) From d97ffb3f58dd3ea66768d03d0108d2dedf35a4ac Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 11 May 2015 10:37:41 -0500 Subject: [PATCH 200/720] Be more explicit with exclusions --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f3f8329c76a..daf83b67931 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,4 +7,4 @@ addons: packages: - python2.4 script: - - python2.4 -m compileall -fq -x 'cloud/|zabbix|layman.py|maven_artifact.py|consul' . + - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman.py|/maven_artifact.py|clustering/consul.*\.py' . From 92c6f64adfea070a7183afb8cbae8d3dc1e9591c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 11 May 2015 17:41:18 +0200 Subject: [PATCH 201/720] cloudstack: add new module cs_account --- cloud/cloudstack/cs_account.py | 408 +++++++++++++++++++++++++++++++++ 1 file changed, 408 insertions(+) create mode 100644 cloud/cloudstack/cs_account.py diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py new file mode 100644 index 00000000000..cd6a1228807 --- /dev/null +++ b/cloud/cloudstack/cs_account.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_account +short_description: Manages account on Apache CloudStack based clouds. +description: + - Create, disable, lock, enable and remove accounts. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of account. + required: true + username: + description: + - Username of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + password: + description: + - Password of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + first_name: + description: + - First name of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + last_name: + description: + - Last name of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + email: + description: + - Email of the user to be created if account did not exist. + - Required on C(state=present). + required: false + default: null + timezone: + description: + - Timezone of the user to be created if account did not exist. + required: false + default: null + network_domain: + description: + - Network domain of the account. + required: false + default: null + account_type: + description: + - Type of the account. + required: false + default: 'user' + choices: [ 'user', 'root_admin', 'domain_admin' ] + domain: + description: + - Domain the account is related to. + required: false + default: 'ROOT' + state: + description: + - State of the account. + required: false + default: 'present' + choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# create an account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + username: customer_xy + password: S3Cur3 + last_name: Doe + first_name: John + email: john.doe@example.com + domain: CUSTOMERS + + +# Lock an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: locked + + +# Disable an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: disabled + + +# Enable an existing account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: enabled + + +# Remove an account in domain 'CUSTOMERS' +local_action: + module: cs_account + name: customer_xy + domain: CUSTOMERS + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the account. + returned: success + type: string + sample: linus@example.com +account_type: + description: Type of the account. + returned: success + type: string + sample: user +account_state: + description: State of the account. + returned: success + type: string + sample: enabled +network_domain: + description: Network domain of the account. + returned: success + type: string + sample: example.local +domain: + description: Domain the account is related. + returned: success + type: string + sample: ROOT +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAccount(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.account = None + self.account_types = { + 'user': 0, + 'root_admin': 1, + 'domain_admin': 2, + } + + + def get_account_type(self): + account_type = self.module.params.get('account_type') + return self.account_types[account_type] + + + def get_account(self): + if not self.account: + args = {} + args['listall'] = True + args['domainid'] = self.get_domain('id') + accounts = self.cs.listAccounts(**args) + if accounts: + account_name = self.module.params.get('name') + for a in accounts['account']: + if account_name in [ a['name'] ]: + self.account = a + break + + return self.account + + + def enable_account(self): + account = self.get_account() + if not account: + self.module.fail_json(msg="Failed: account not present") + + if account['state'].lower() != 'enabled': + self.result['changed'] = True + args = {} + args['id'] = account['id'] + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + if not self.module.check_mode: + res = self.cs.enableAccount(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + account = res['account'] + return account + + + def lock_account(self): + return self.lock_or_disable_account(lock=True) + + + def disable_account(self): + return self.lock_or_disable_account() + + + def lock_or_disable_account(self, lock=False): + account = self.get_account() + if not account: + self.module.fail_json(msg="Failed: account not present") + + # we need to enable the account to lock it. + if lock and account['state'].lower() == 'disabled': + account = self.enable_account() + + if lock and account['state'].lower() != 'locked' \ + or not lock and account['state'].lower() != 'disabled': + self.result['changed'] = True + args = {} + args['id'] = account['id'] + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + args['lock'] = lock + if not self.module.check_mode: + account = self.cs.disableAccount(**args) + + if 'errortext' in account: + self.module.fail_json(msg="Failed: '%s'" % account['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + account = self._poll_job(account, 'account') + return account + + + def present_account(self): + missing_params = [] + + if not self.module.params.get('email'): + missing_params.append('email') + + if not self.module.params.get('username'): + missing_params.append('username') + + if not self.module.params.get('password'): + missing_params.append('password') + + if not self.module.params.get('first_name'): + missing_params.append('first_name') + + if not self.module.params.get('last_name'): + missing_params.append('last_name') + + if missing_params: + self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + + account = self.get_account() + + if not account: + self.result['changed'] = True + + args = {} + args['account'] = self.module.params.get('name') + args['domainid'] = self.get_domain('id') + args['accounttype'] = self.get_account_type() + args['networkdomain'] = self.module.params.get('network_domain') + args['username'] = self.module.params.get('username') + args['password'] = self.module.params.get('password') + args['firstname'] = self.module.params.get('first_name') + args['lastname'] = self.module.params.get('last_name') + args['email'] = self.module.params.get('email') + args['timezone'] = self.module.params.get('timezone') + if not self.module.check_mode: + res = self.cs.createAccount(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + account = res['account'] + return account + + + def absent_account(self): + account = self.get_account() + if account: + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.deleteAccount(id=account['id']) + + if 'errortext' in account: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'account') + return account + + + def get_result(self, account): + if account: + if 'name' in account: + self.result['name'] = account['name'] + if 'accounttype' in account: + for key,value in self.account_types.items(): + if value == account['accounttype']: + self.result['account_type'] = key + break + if 'state' in account: + self.result['account_state'] = account['state'] + if 'domain' in account: + self.result['domain'] = account['domain'] + if 'networkdomain' in account: + self.result['network_domain'] = account['networkdomain'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'), + account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'), + network_domain = dict(default=None), + domain = dict(default='ROOT'), + email = dict(default=None), + first_name = dict(default=None), + last_name = dict(default=None), + username = dict(default=None), + password = dict(default=None), + timezone = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_acc = AnsibleCloudStackAccount(module) + + state = module.params.get('state') + + if state in ['absent']: + account = acs_acc.absent_account() + + elif state in ['enabled']: + account = acs_acc.enable_account() + + elif state in ['disabled']: + account = acs_acc.disable_account() + + elif state in ['locked']: + account = acs_acc.lock_account() + + else: + account = acs_acc.present_account() + + result = acs_acc.get_result(account) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From a589a66710171bf9166b1b35699bb2baa286333e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 10:09:56 -0700 Subject: [PATCH 202/720] Update module documentation if modules have a dep on python2.6+ --- cloud/amazon/cloudtrail.py | 4 +++- cloud/google/gce_img.py | 4 +++- cloud/lxc/lxc_container.py | 5 ++++- cloud/misc/ovirt.py | 4 +++- cloud/misc/virt.py | 4 +++- cloud/vmware/vmware_datacenter.py | 1 + 6 files changed, 17 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index b58bcd6e1d0..312b9b08c9c 100755 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -19,9 +19,11 @@ DOCUMENTATION = """ module: cloudtrail short_description: manage CloudTrail creation and deletion description: - - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. + - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. version_added: "2.0" author: Ted Timmons +requirements: + - "boto >= 2.21" options: state: description: diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 3b2351b3752..ae4c31dc4c5 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -78,7 +78,9 @@ options: default: null aliases: [] -requirements: [ "libcloud" ] +requirements: + - "python >= 2.6" + - "apache-libcloud" author: Peter Tan ''' diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 5f0f6bb2ad6..7e718608287 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -149,7 +149,10 @@ options: description: - list of 'key=value' options to use when configuring a container. required: false -requirements: ['lxc >= 1.0', 'python2-lxc >= 0.1'] +requirements: + - 'lxc >= 1.0' + - 'python >= 2.6' + - 'python2-lxc >= 0.1' notes: - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module will diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py index 2d54ad3f401..258637ddaec 100755 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -152,7 +152,9 @@ options: aliases: [] choices: ['present', 'absent', 'shutdown', 'started', 'restarted'] -requirements: [ "ovirt-engine-sdk" ] +requirements: + - "python >= 2.6" + - "ovirt-engine-sdk-python" ''' EXAMPLES = ''' # Basic example provisioning from image. diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index f1d36fc1964..a2567f586a9 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -55,7 +55,9 @@ options: - XML document used with the define command required: false default: null -requirements: [ "libvirt" ] +requirements: + - "python >= 2.6" + - "libvirt-python" author: Michael DeHaan, Seth Vidal ''' diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py index 35cf7fa4692..10a6f493bf8 100644 --- a/cloud/vmware/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -29,6 +29,7 @@ author: Joseph Callen notes: - Tested on vSphere 5.5 requirements: + - "python >= 2.6" - PyVmomi options: hostname: From 752442e8ff90520e1603486df0e97d823ba4252d Mon Sep 17 00:00:00 2001 From: "@RubenKelevra" Date: Mon, 11 May 2015 23:58:08 +0200 Subject: [PATCH 203/720] change -Syy to -Sy -Syy do always update, which is not needed, since the caches might already be up to date. --- packaging/os/pacman.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index a91f8e3054d..0852fe56ea7 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -76,7 +76,7 @@ EXAMPLES = ''' # Recursively remove package baz - pacman: name=baz state=absent recurse=yes -# Run the equivalent of "pacman -Syy" as a separate step +# Run the equivalent of "pacman -Sy" as a separate step - pacman: update_cache=yes ''' @@ -122,7 +122,7 @@ def query_package(module, name, state="present"): def update_package_db(module): - cmd = "pacman -Syy" + cmd = "pacman -Sy" rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: From 838cd4123bcf573c99cf7e4c54a671bf136139f7 Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 10:56:22 +0200 Subject: [PATCH 204/720] added lower function for statuses --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 8772d22b2d8..fcb55587c2e 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -77,7 +77,7 @@ def main(): # Process 'name' Running - restart pending parts = line.split() if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: - return ' '.join(parts[2:]) + return ' '.join(parts[2:]).lower() else: return '' From 55b9ab277493eac80cdf0eafcfde28149d60452f Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 10:58:47 +0200 Subject: [PATCH 205/720] fix a problem with status detection after unmonitor command --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index fcb55587c2e..69a0eed11c9 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -119,7 +119,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) status = run_command('unmonitor') - if status in ['not monitored']: + if status in ['not monitored'] or 'unmonitor pending' in status: module.exit_json(changed=True, name=name, state=state) module.fail_json(msg='%s process not unmonitored' % name, status=status) From 1f9f9a549ecbde78efdc0cafa25dd589f64b8a68 Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 11:07:52 +0200 Subject: [PATCH 206/720] status function was called twice --- monitoring/monit.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 69a0eed11c9..f20ba706bea 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -86,7 +86,8 @@ def main(): module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) return status() - present = status() != '' + process_status = status() + present = process_status != '' if not present and not state == 'present': module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) @@ -102,7 +103,7 @@ def main(): module.exit_json(changed=True, name=name, state=state) module.exit_json(changed=False, name=name, state=state) - running = 'running' in status() + running = 'running' in process_status if running and state in ['started', 'monitored']: module.exit_json(changed=False, name=name, state=state) From 9b32a5d8bf345b7cee3609573c0ebcdba69f8b2f Mon Sep 17 00:00:00 2001 From: Chris Long Date: Tue, 12 May 2015 22:10:53 +1000 Subject: [PATCH 207/720] Initial commit of nmcli: NetworkManager module. Currently supports: Create, modify, remove of - team, team-slave, bond, bond-slave, ethernet TODO: vlan, bridge, wireless related connections. --- network/nmcli.py | 1089 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1089 insertions(+) create mode 100644 network/nmcli.py diff --git a/network/nmcli.py b/network/nmcli.py new file mode 100644 index 00000000000..0532058da3b --- /dev/null +++ b/network/nmcli.py @@ -0,0 +1,1089 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Chris Long +# +# This file is a module for Ansible that interacts with Network Manager +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION=''' +--- +module: nmcli +author: Chris Long +short_description: Manage Networking +requirements: [ nmcli, dbus ] +description: + - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc. +options: + state: + required: True + default: "present" + choices: [ present, absent ] + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + enabled: + required: False + default: "yes" + choices: [ "yes", "no" ] + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + - Whether the connection profile can be automatically activated ( default: yes) + action: + required: False + default: None + choices: [ add, modify, show, up, down ] + description: + - Set to 'add' if you want to add a connection. + - Set to 'modify' if you want to modify a connection. Modify one or more properties in the connection profile. + - Set to 'delete' if you want to delete a connection. Delete a configured connection. The connection to be deleted is identified by its name 'cfname'. + - Set to 'show' if you want to show a connection. Will show all devices unless 'cfname' is set. + - Set to 'up' if you want to bring a connection up. Requires 'cfname' to be set. + - Set to 'down' if you want to bring a connection down. Requires 'cfname' to be set. + cname: + required: True + default: None + description: + - Where CNAME will be the name used to call the connection. when not provided a default name is generated: [-][-] + ifname: + required: False + default: cname + description: + - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. + - interface to bind the connection to. The connection will only be applicable to this interface name. + - A special value of "*" can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. + type: + required: False + choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ] + description: + - This is the type of device or network connection that you wish to create. + mode: + required: False + choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ] + default: None + description: + - This is the type of device or network connection that you wish to create for a bond, team or bridge. (NetworkManager default: balance-rr) + master: + required: False + default: None + description: + - master ] STP forwarding delay, in seconds (NetworkManager default: 15) + hellotime: + required: False + default: None + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds (NetworkManager default: 2) + maxage: + required: False + default: None + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds (NetworkManager default: 20) + ageingtime: + required: False + default: None + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds (NetworkManager default: 300) + mac: + required: False + default: None + description: + - This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel) + slavepriority: + required: False + default: None + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave (default: 32) + path_cost: + required: False + default: None + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave (NetworkManager default: 100) + hairpin: + required: False + default: None + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. (NetworkManager default: yes) + vlanid: + required: False + default: None + description: + - This is only used with VLAN - VLAN ID in range <0-4095> + vlandev: + required: False + default: None + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname + flags: + required: False + default: None + description: + - This is only used with VLAN - flags + ingress: + required: False + default: None + description: + - This is only used with VLAN - VLAN ingress priority mapping + egress: + required: False + default: None + description: + - This is only used with VLAN - VLAN egress priority mapping + +''' + +EXAMPLES=''' +The following examples are working examples that I have run in the field. I followed follow the structure: +``` +|_/inventory/cloud-hosts +| /group_vars/openstack-stage.yml +| /host_vars/controller-01.openstack.host.com +| /host_vars/controller-02.openstack.host.com +|_/playbook/library/nmcli.py +| /playbook-add.yml +| /playbook-del.yml +``` + +## inventory examples +### groups_vars +```yml +--- +#devops_os_define_network +storage_gw: "192.168.0.254" +external_gw: "10.10.0.254" +tenant_gw: "172.100.0.254" + +#Team vars +nmcli_team: + - {cname: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {cname: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} + - {cname: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} +nmcli_team_slave: + - {cname: 'em1', ifname: 'em1', master: 'tenant'} + - {cname: 'em2', ifname: 'em2', master: 'tenant'} + - {cname: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {cname: 'p2p2', ifname: 'p2p2', master: 'external'} + +#bond vars +nmcli_bond: + - {cname: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'} + - {cname: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'} + - {cname: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'} +nmcli_bond_slave: + - {cname: 'em1', ifname: 'em1', master: 'tenant'} + - {cname: 'em2', ifname: 'em2', master: 'tenant'} + - {cname: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {cname: 'p2p2', ifname: 'p2p2', master: 'external'} + +#ethernet vars +nmcli_ethernet: + - {cname: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {cname: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"} + - {cname: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} + - {cname: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} +``` + +### host_vars +```yml +--- +storage_ip: "192.168.160.21/23" +external_ip: "10.10.152.21/21" +tenant_ip: "192.168.200.21/23" +``` + + + +## playbook-add.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + +- name: install needed network manager libs + yum: name={{ item }} state=installed + with_items: + - libnm-qt-devel.x86_64 + - nm-connection-editor.x86_64 + - libsemanage-python + - policycoreutils-python + +##### Working with all cloud nodes - Teaming + - name: try nmcli add team - cname only & ip4 gw4 + nmcli: type=team cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} state=present + with_items: + - "{{nmcli_team}}" + + - name: try nmcli add teams-slave + nmcli: type=team-slave cname={{item.cname}} ifname={{item.ifname}} master={{item.master}} state=present + with_items: + - "{{nmcli_team_slave}}" + +###### Working with all cloud nodes - Bonding +# - name: try nmcli add bond - cname only & ip4 gw4 mode +# nmcli: type=bond cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present +# with_items: +# - "{{nmcli_bond}}" +# +# - name: try nmcli add bond-slave +# nmcli: type=bond-slave cname={{item.cname}} ifname={{item.ifname}} master={{item.master}} state=present +# with_items: +# - "{{nmcli_bond_slave}}" + +##### Working with all cloud nodes - Ethernet +# - name: nmcli add Ethernet - cname only & ip4 gw4 +# nmcli: type=ethernet cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} state=present +# with_items: +# - "{{nmcli_ethernet}}" +``` + +## playbook-del.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + + - name: try nmcli del team - multiple + nmcli: cname={{item.cname}} state=absent + with_items: + - { cname: 'em1'} + - { cname: 'em2'} + - { cname: 'p1p1'} + - { cname: 'p1p2'} + - { cname: 'p2p1'} + - { cname: 'p2p2'} + - { cname: 'tenant'} + - { cname: 'storage'} + - { cname: 'external'} + - { cname: 'team-em1'} + - { cname: 'team-em2'} + - { cname: 'team-p1p1'} + - { cname: 'team-p1p2'} + - { cname: 'team-p2p1'} + - { cname: 'team-p2p2'} +``` +# To add an Ethernet connection with static IP configuration, issue a command as follows +- nmcli: cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + +# To add an Team connection with static IP configuration, issue a command as follows +- nmcli: cname=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present enabled=yes + +# Optionally, at the same time specify IPv6 addresses for the device as follows: +- nmcli: cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present + +# To add two IPv4 DNS server addresses: +-nmcli: cname=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present + +# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows +- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present + +# To change the property of a setting e.g. MTU, issue a command as follows: +- nmcli: cname=my-eth1 mtu=9000 state=present + + Exit Status's: + - nmcli exits with status 0 if it succeeds, a value greater than 0 is + returned if an error occurs. + - 0 Success - indicates the operation succeeded + - 1 Unknown or unspecified error + - 2 Invalid user input, wrong nmcli invocation + - 3 Timeout expired (see --wait option) + - 4 Connection activation failed + - 5 Connection deactivation failed + - 6 Disconnecting device failed + - 7 Connection deletion failed + - 8 NetworkManager is not running + - 9 nmcli and NetworkManager versions mismatch + - 10 Connection, device, or access point does not exist. +''' +# import ansible.module_utils.basic +import os +import syslog +import sys +import dbus +from gi.repository import NetworkManager, NMClient + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform='Generic' + distribution=None + bus=dbus.SystemBus() + # The following is going to be used in dbus code + DEVTYPES={1: "Ethernet", + 2: "Wi-Fi", + 5: "Bluetooth", + 6: "OLPC", + 7: "WiMAX", + 8: "Modem", + 9: "InfiniBand", + 10: "Bond", + 11: "VLAN", + 12: "ADSL", + 13: "Bridge", + 14: "Generic", + 15: "Team" + } + STATES={0: "Unknown", + 10: "Unmanaged", + 20: "Unavailable", + 30: "Disconnected", + 40: "Prepare", + 50: "Config", + 60: "Need Auth", + 70: "IP Config", + 80: "IP Check", + 90: "Secondaries", + 100: "Activated", + 110: "Deactivating", + 120: "Failed" + } + + def __new__(cls, *args, **kwargs): + return load_platform_subclass(Nmcli, args, kwargs) + + def __init__(self, module): + self.module=module + self.state=module.params['state'] + self.enabled=module.params['enabled'] + self.action=module.params['action'] + self.cname=module.params['cname'] + self.master=module.params['master'] + self.autoconnect=module.params['autoconnect'] + self.ifname=module.params['ifname'] + self.type=module.params['type'] + self.ip4=module.params['ip4'] + self.gw4=module.params['gw4'] + self.dns4=module.params['dns4'] + self.ip6=module.params['ip6'] + self.gw6=module.params['gw6'] + self.dns6=module.params['dns6'] + self.mtu=module.params['mtu'] + self.stp=module.params['stp'] + self.priority=module.params['priority'] + self.mode=module.params['mode'] + self.miimon=module.params['miimon'] + self.downdelay=module.params['downdelay'] + self.updelay=module.params['updelay'] + self.arp_interval=module.params['arp_interval'] + self.arp_ip_target=module.params['arp_ip_target'] + self.slavepriority=module.params['slavepriority'] + self.forwarddelay=module.params['forwarddelay'] + self.hellotime=module.params['hellotime'] + self.maxage=module.params['maxage'] + self.ageingtime=module.params['ageingtime'] + self.mac=module.params['mac'] + self.vlanid=module.params['vlanid'] + self.vlandev=module.params['vlandev'] + self.flags=module.params['flags'] + self.ingress=module.params['ingress'] + self.egress=module.params['egress'] + # select whether we dump additional debug info through syslog + self.syslogging=True + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + if self.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) + + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def merge_secrets(self, proxy, config, setting_name): + try: + # returns a dict of dicts mapping name::setting, where setting is a dict + # mapping key::value. Each member of the 'setting' dict is a secret + secrets=proxy.GetSecrets(setting_name) + + # Copy the secrets into our connection config + for setting in secrets: + for key in secrets[setting]: + config[setting_name][key]=secrets[setting][key] + except Exception, e: + pass + + def dict_to_string(self, d): + # Try to trivially translate a dictionary's elements into nice string + # formatting. + dstr="" + for key in d: + val=d[key] + str_val="" + add_string=True + if type(val)==type(dbus.Array([])): + for elt in val: + if type(elt)==type(dbus.Byte(1)): + str_val+="%s " % int(elt) + elif type(elt)==type(dbus.String("")): + str_val+="%s" % elt + elif type(val)==type(dbus.Dictionary({})): + dstr+=self.dict_to_string(val) + add_string=False + else: + str_val=val + if add_string: + dstr+="%s: %s\n" % ( key, str_val) + return dstr + + def connection_to_string(self, config): + # dump a connection configuration to use in list_connection_info + setting_list=[] + for setting_name in config: + setting_list.append(self.dict_to_string(config[setting_name])) + return setting_list + # print "" + + def list_connection_info(self): + # Ask the settings service for the list of connections it provides + bus=dbus.SystemBus() + + service_name="org.freedesktop.NetworkManager" + proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings") + settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings") + connection_paths=settings.ListConnections() + connection_list=[] + # List each connection's name, UUID, and type + for path in connection_paths: + con_proxy=bus.get_object(service_name, path) + settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection") + config=settings_connection.GetSettings() + + # Now get secrets too; we grab the secrets for each type of connection + # (since there isn't a "get all secrets" call because most of the time + # you only need 'wifi' secrets or '802.1x' secrets, not everything) and + # merge that into the configuration data - To use at a later stage + self.merge_secrets(settings_connection, config, '802-11-wireless') + self.merge_secrets(settings_connection, config, '802-11-wireless-security') + self.merge_secrets(settings_connection, config, '802-1x') + self.merge_secrets(settings_connection, config, 'gsm') + self.merge_secrets(settings_connection, config, 'cdma') + self.merge_secrets(settings_connection, config, 'ppp') + + # Get the details of the 'connection' setting + s_con=config['connection'] + connection_list.append(s_con['id']) + connection_list.append(s_con['uuid']) + connection_list.append(s_con['type']) + connection_list.append(self.connection_to_string(config)) + return connection_list + + def connection_exists(self): + # we are going to use name and type in this instance to find if that connection exists and is of type x + connections=self.list_connection_info() + + for con_item in connections: + if self.cname==con_item: + return True + + def down_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # if self.connection_exists(): + cmd.append('con') + cmd.append('down') + cmd.append(self.cname) + return self.execute_command(cmd) + + def up_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('up') + cmd.append(self.cname) + return self.execute_command(cmd) + + def create_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('team') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def modify_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + # Can't use MTU with team + return cmd + + def create_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append(self.type) + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + cmd.append('master') + if self.cname is not None: + cmd.append(self.master) + # if self.mtu is not None: + # cmd.append('802-3-ethernet.mtu') + # cmd.append(self.mtu) + return cmd + + def modify_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + cmd.append('connection.master') + cmd.append(self.master) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + return cmd + + def create_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('bond') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + if self.mode is not None: + cmd.append('mode') + cmd.append(self.mode) + if self.miimon is not None: + cmd.append('miimon') + cmd.append(self.miimon) + if self.downdelay is not None: + cmd.append('downdelay') + cmd.append(self.downdelay) + if self.downdelay is not None: + cmd.append('updelay') + cmd.append(self.updelay) + if self.downdelay is not None: + cmd.append('arp-interval') + cmd.append(self.arp_interval) + if self.downdelay is not None: + cmd.append('arp-ip-target') + cmd.append(self.arp_ip_target) + return cmd + + def modify_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def create_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append('bond-slave') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + cmd.append('master') + if self.cname is not None: + cmd.append(self.master) + return cmd + + def modify_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + cmd.append('connection.master') + cmd.append(self.master) + return cmd + + def create_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('ethernet') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def modify_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def create_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bridge interface + return cmd + + def modify_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bridge interface + return cmd + + def create_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + return cmd + + def modify_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + return cmd + + def create_connection(self): + cmd=[] + if self.type=='team': + # cmd=self.create_connection_team() + if (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_team() + self.execute_command(cmd) + cmd=self.modify_connection_team() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + elif (self.dns4 is None) or (self.dns6 is None): + cmd=self.create_connection_team() + return self.execute_command(cmd) + elif self.type=='team-slave': + if self.mtu is not None: + cmd=self.create_connection_team_slave() + self.execute_command(cmd) + cmd=self.modify_connection_team_slave() + self.execute_command(cmd) + # cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_team_slave() + return self.execute_command(cmd) + elif self.type=='bond': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_bond() + self.execute_command(cmd) + cmd=self.modify_connection_bond() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_bond() + return self.execute_command(cmd) + elif self.type=='bond-slave': + cmd=self.create_connection_bond_slave() + elif self.type=='ethernet': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_ethernet() + self.execute_command(cmd) + cmd=self.modify_connection_ethernet() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_ethernet() + return self.execute_command(cmd) + elif self.type=='bridge': + cmd=self.create_connection_bridge() + elif self.type=='vlan': + cmd=self.create_connection_vlan() + return self.execute_command(cmd) + + def remove_connection(self): + # self.down_connection() + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('del') + cmd.append(self.cname) + return self.execute_command(cmd) + + def modify_connection(self): + cmd=[] + if self.type=='team': + cmd=self.modify_connection_team() + elif self.type=='team-slave': + cmd=self.modify_connection_team_slave() + elif self.type=='bond': + cmd=self.modify_connection_bond() + elif self.type=='bond-slave': + cmd=self.modify_connection_bond_slave() + elif self.type=='ethernet': + cmd=self.modify_connection_ethernet() + elif self.type=='bridge': + cmd=self.modify_connection_bridge() + elif self.type=='vlan': + cmd=self.modify_connection_vlan() + return self.execute_command(cmd) + + +def main(): + # Parsing argument file + module=AnsibleModule( + argument_spec=dict( + enabled=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + action=dict(required=False, default=None, choices=['add', 'mod', 'show', 'up', 'down', 'del'], type='str'), + state=dict(required=True, default=None, choices=['present', 'absent'], type='str'), + cname=dict(required=False, type='str'), + master=dict(required=False, default=None, type='str'), + autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + ifname=dict(required=False, default=None, type='str'), + type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'), + ip4=dict(required=False, default=None, type='str'), + gw4=dict(required=False, default=None, type='str'), + dns4=dict(required=False, default=None, type='str'), + ip6=dict(required=False, default=None, type='str'), + gw6=dict(required=False, default=None, type='str'), + dns6=dict(required=False, default=None, type='str'), + # Bond Specific vars + mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'), + miimon=dict(required=False, default=None, type='str'), + downdelay=dict(required=False, default=None, type='str'), + updelay=dict(required=False, default=None, type='str'), + arp_interval=dict(required=False, default=None, type='str'), + arp_ip_target=dict(required=False, default=None, type='str'), + # general usage + mtu=dict(required=False, default=None, type='str'), + mac=dict(required=False, default=None, type='str'), + # bridge specific vars + stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'), + priority=dict(required=False, default="128", type='str'), + slavepriority=dict(required=False, default="32", type='str'), + forwarddelay=dict(required=False, default="15", type='str'), + hellotime=dict(required=False, default="2", type='str'), + maxage=dict(required=False, default="20", type='str'), + ageingtime=dict(required=False, default="300", type='str'), + # vlan specific vars + vlanid=dict(required=False, default=None, type='str'), + vlandev=dict(required=False, default=None, type='str'), + flags=dict(required=False, default=None, type='str'), + ingress=dict(required=False, default=None, type='str'), + egress=dict(required=False, default=None, type='str'), + ), + supports_check_mode=True + ) + + nmcli=Nmcli(module) + + if nmcli.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'Nmcli instantiated - platform %s' % nmcli.platform) + if nmcli.distribution: + syslog.syslog(syslog.LOG_NOTICE, 'Nuser instantiated - distribution %s' % nmcli.distribution) + + rc=None + out='' + err='' + result={} + result['cname']=nmcli.cname + result['state']=nmcli.state + + # check for issues + if nmcli.cname is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + # team-slave checks + if nmcli.type=='team-slave' and nmcli.master is None: + nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing") + if nmcli.type=='team-slave' and nmcli.ifname is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + + if nmcli.state=='absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.down_connection() + (rc, out, err)=nmcli.remove_connection() + if rc!=0: + module.fail_json(name =('No Connection named %s exists' % nmcli.cname), msg=err, rc=rc) + + elif nmcli.state=='present': + if nmcli.connection_exists(): + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.cname, nmcli.type)) + result['Exists']='Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.modify_connection() + if not nmcli.connection_exists(): + result['Connection']=('Connection %s of Type %s is being added' % (nmcli.cname, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.create_connection() + if rc is not None and rc!=0: + module.fail_json(name=nmcli.cname, msg=err, rc=rc) + + if rc is None: + result['changed']=False + else: + result['changed']=True + if out: + result['stdout']=out + if err: + result['stderr']=err + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +main() \ No newline at end of file From 6c8d505532dd33d508f0250dcaadda80e29b2d51 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 12 May 2015 10:36:27 -0500 Subject: [PATCH 208/720] Add py27 compileall to test all modules, specifically for syntax errors --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index daf83b67931..cf64440ae3c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,3 +8,4 @@ addons: - python2.4 script: - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman.py|/maven_artifact.py|clustering/consul.*\.py' . + - python -m compileall -fq . From e5022ba87b6c45488b7d4e140df7f098495dba67 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 13:25:50 -0700 Subject: [PATCH 209/720] Add python >= 2.6 to documented deps --- clustering/consul.py | 1 + clustering/consul_acl.py | 1 + clustering/consul_kv.py | 1 + clustering/consul_session.py | 1 + monitoring/zabbix_group.py | 4 +++- monitoring/zabbix_host.py | 3 ++- monitoring/zabbix_hostmacro.py | 3 ++- monitoring/zabbix_maintenance.py | 3 ++- monitoring/zabbix_screen.py | 3 ++- packaging/language/maven_artifact.py | 4 ++-- packaging/os/layman.py | 3 +++ 11 files changed, 20 insertions(+), 7 deletions(-) diff --git a/clustering/consul.py b/clustering/consul.py index 5db79e20c40..27761094698 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -38,6 +38,7 @@ description: changed occurred. An api method is planned to supply this metadata so at that stage change management will be added. requirements: + - "python >= 2.6" - python-consul - requests version_added: "1.9" diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index c481b780a64..57ff795931f 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -25,6 +25,7 @@ description: rules in a consul cluster via the agent. For more details on using and configuring ACLs, see https://www.consul.io/docs/internals/acl.html. requirements: + - "python >= 2.6" - python-consul - pyhcl - requests diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index e5a010a8c18..02d06868946 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -28,6 +28,7 @@ description: represents a prefix then Note that when a value is removed, the existing value if any is returned as part of the results. requirements: + - "python >= 2.6" - python-consul - requests version_added: "1.9" diff --git a/clustering/consul_session.py b/clustering/consul_session.py index 8e6516891d2..6f3a611a642 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -26,6 +26,7 @@ description: to implement distributed locks. In depth documentation for working with sessions can be found here http://www.consul.io/docs/internals/sessions.html requirements: + - "python >= 2.6" - python-consul - requests version_added: "1.9" diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 489a8617f54..d338267e370 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -26,7 +26,9 @@ short_description: Add or remove a host group to Zabbix. description: - This module uses the Zabbix API to add and remove host groups. version_added: '1.8' -requirements: [ 'zabbix-api' ] +requirements: + - "python >= 2.6" + - zabbix-api options: state: description: diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index c7b8e52b9e7..5798e663355 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -28,7 +28,8 @@ description: version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: server_url: description: diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index b41e114d760..7869d55c315 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -28,7 +28,8 @@ description: version_added: "2.0" author: Dean Hailin Song requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: server_url: description: diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 559f9e0e55a..f59149fd07a 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -28,7 +28,8 @@ description: version_added: "1.8" author: Alexander Bulimov requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: state: description: diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index ada2b1c6ab0..aa895af0bbd 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -29,7 +29,8 @@ description: version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: server_url: description: diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index 2aeb158625b..f5d8f185a55 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -39,8 +39,8 @@ description: - available. author: Chris Schmidt requirements: - - python libxml - - python urllib2 + - "python >= 2.6" + - lxml options: group_id: description: The Maven groupId coordinate diff --git a/packaging/os/layman.py b/packaging/os/layman.py index 57c03528c9e..b4830f8ec2c 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -31,6 +31,9 @@ short_description: Manage Gentoo overlays description: - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman must be installed on a managed node prior using this module. +requirements: + - "python >= 2.6" + - layman python module options: name: description: From 33b845a14f8b72ee5f84c9de13727922b7cd022c Mon Sep 17 00:00:00 2001 From: dhutty Date: Tue, 12 May 2015 12:43:33 -0400 Subject: [PATCH 210/720] Additional example, not using local_action --- notification/mail.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/notification/mail.py b/notification/mail.py index ae33c5ca4ca..6dc3da533e6 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -138,6 +138,13 @@ EXAMPLES = ''' attach="/etc/group /tmp/pavatar2.png" headers=Reply-To=john@example.com|X-Special="Something or other" charset=utf8 +# Sending an e-mail using the remote machine, not the Ansible controller node +- mail: + host='localhost' + port=25 + to="John Smith " + subject='Ansible-report' + body='System {{ ansible_hostname }} has been successfully provisioned.' ''' import os From 910761bd6da8391320d253121e70f78b5bd0db52 Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Tue, 12 May 2015 14:27:22 -0700 Subject: [PATCH 211/720] Added version_added in Documentation output. Fixed shebang to be /usr/bin/python. --- monitoring/circonus_annotation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index 75ca6540cbe..9144df987f0 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # (c) 2014-2015, Epic Games, Inc. @@ -13,6 +13,7 @@ short_description: create an annotation in circonus description: - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided author: Nick Harring +version_added: 2.0 requirements: - urllib3 - requests From 46f6e5f6fcbc57f223653d290445d63461a922d6 Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Tue, 12 May 2015 14:35:47 -0700 Subject: [PATCH 212/720] Fixed ansible module import which I changed chasing a better PEP-8 score. --- monitoring/circonus_annotation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index 9144df987f0..ebb5ecc175e 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -128,5 +128,5 @@ def main(): module.fail_json(msg='Request Failed', reason=e) module.exit_json(changed=True, annotation=resp.json()) -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import * main() From cf2b734ab3daa08a92b5db619b3f0f108524ce44 Mon Sep 17 00:00:00 2001 From: Nick Harring Date: Tue, 12 May 2015 14:42:21 -0700 Subject: [PATCH 213/720] Removed as keyword for 2.4 and earlier backwards compat. --- monitoring/circonus_annotation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index ebb5ecc175e..e2b6d8d79ed 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -124,8 +124,8 @@ def main(): annotation = create_annotation(module) try: resp = post_annotation(annotation, module.params['api_key']) - except requests.exceptions.RequestException as e: - module.fail_json(msg='Request Failed', reason=e) + except requests.exceptions.RequestException, err_str: + module.fail_json(msg='Request Failed', reason=err_str) module.exit_json(changed=True, annotation=resp.json()) from ansible.module_utils.basic import * From 95305f4ea9460d25612311c694ad48fe7b6998ea Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 10 May 2015 23:46:03 +0200 Subject: [PATCH 214/720] cloudstack: cs_sshkeypair: add account and domain support --- cloud/cloudstack/cs_sshkeypair.py | 50 +++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 8dd02dcd1f1..1f1aa2c1fdd 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -23,6 +23,7 @@ DOCUMENTATION = ''' module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: + - Create, register and remove SSH keys. - If no key was found and no public key was provided and a new SSH private/public key pair will be created and the private key will be returned. version_added: '2.0' @@ -32,6 +33,16 @@ options: description: - Name of public key. required: true + domain: + description: + - Domain the public key is related to. + required: false + default: null + account: + description: + - Account the public key is related to. + required: false + default: null project: description: - Name of the project the public key to be registered in. @@ -111,10 +122,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() - - args = {} - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') res = None if not ssh_key: @@ -142,9 +154,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): ssh_key = self.get_ssh_key() if not ssh_key: self.result['changed'] = True - args = {} - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') if not self.module.check_mode: res = self.cs.createSSHKeyPair(**args) ssh_key = res['keypair'] @@ -155,9 +169,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): ssh_key = self.get_ssh_key() if ssh_key: self.result['changed'] = True - args = {} - args['name'] = self.module.params.get('name') - args['projectid'] = self.get_project_id() + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') if not self.module.check_mode: res = self.cs.deleteSSHKeyPair(**args) return ssh_key @@ -165,9 +181,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def get_ssh_key(self): if not self.ssh_key: - args = {} - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') + args = {} + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') ssh_keys = self.cs.listSSHKeyPairs(**args) if ssh_keys and 'sshkeypair' in ssh_keys: @@ -179,10 +197,8 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): if ssh_key: if 'fingerprint' in ssh_key: self.result['fingerprint'] = ssh_key['fingerprint'] - if 'name' in ssh_key: self.result['name'] = ssh_key['name'] - if 'privatekey' in ssh_key: self.result['private_key'] = ssh_key['privatekey'] return self.result @@ -196,8 +212,10 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), public_key = dict(default=None), + domain = dict(default=None), + account = dict(default=None), project = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), api_key = dict(default=None), From 185cb8757d8a87255f53cd96060bc14f18b847f5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 20:12:45 +0200 Subject: [PATCH 215/720] cloudstack: cs_securitygroup_rule: type -> sg_type --- cloud/cloudstack/cs_securitygroup_rule.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 1f2dac6f267..714efe956a3 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -306,15 +306,15 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): rule = None res = None - type = self.module.params.get('type') - if type == 'ingress': + sg_type = self.module.params.get('type') + if sg_type == 'ingress': rule = self._get_rule(security_group['ingressrule']) if not rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.authorizeSecurityGroupIngress(**args) - elif type == 'egress': + elif sg_type == 'egress': rule = self._get_rule(security_group['egressrule']) if not rule: self.result['changed'] = True @@ -334,15 +334,15 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): security_group = self.get_security_group() rule = None res = None - type = self.module.params.get('type') - if type == 'ingress': + sg_type = self.module.params.get('type') + if sg_type == 'ingress': rule = self._get_rule(security_group['ingressrule']) if rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) - elif type == 'egress': + elif sg_type == 'egress': rule = self._get_rule(security_group['egressrule']) if rule: self.result['changed'] = True From 5de249aaf96cea53140c9c9a4efd3e1109367a82 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 20:19:37 +0200 Subject: [PATCH 216/720] cloudstack: cs_securitygroup_rule: fix result was not always what expected --- cloud/cloudstack/cs_securitygroup_rule.py | 47 +++++++++++------------ 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 714efe956a3..deeadd06c1a 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -327,7 +327,10 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): poll_async = self.module.params.get('poll_async') if res and poll_async: security_group = self._poll_job(res, 'securitygroup') - return security_group + key = sg_type + "rule" # ingressrule / egressrule + if key in security_group: + rule = security_group[key][0] + return rule def remove_rule(self): @@ -355,34 +358,30 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): poll_async = self.module.params.get('poll_async') if res and poll_async: res = self._poll_job(res, 'securitygroup') - return security_group + return rule def get_result(self, security_group_rule): - type = self.module.params.get('type') - - key = 'ingressrule' - if type == 'egress': - key = 'egressrule' - self.result['type'] = type + self.result['type'] = self.module.params.get('type') self.result['security_group'] = self.module.params.get('security_group') - - if key in security_group_rule and security_group_rule[key]: - if 'securitygroupname' in security_group_rule[key][0]: - self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname'] - if 'cidr' in security_group_rule[key][0]: - self.result['cidr'] = security_group_rule[key][0]['cidr'] - if 'protocol' in security_group_rule[key][0]: - self.result['protocol'] = security_group_rule[key][0]['protocol'] - if 'startport' in security_group_rule[key][0]: - self.result['start_port'] = security_group_rule[key][0]['startport'] - if 'endport' in security_group_rule[key][0]: - self.result['end_port'] = security_group_rule[key][0]['endport'] - if 'icmpcode' in security_group_rule[key][0]: - self.result['icmp_code'] = security_group_rule[key][0]['icmpcode'] - if 'icmptype' in security_group_rule[key][0]: - self.result['icmp_type'] = security_group_rule[key][0]['icmptype'] + + if security_group_rule: + rule = security_group_rule + if 'securitygroupname' in rule: + self.result['user_security_group'] = rule['securitygroupname'] + if 'cidr' in rule: + self.result['cidr'] = rule['cidr'] + if 'protocol' in rule: + self.result['protocol'] = rule['protocol'] + if 'startport' in rule: + self.result['start_port'] = rule['startport'] + if 'endport' in rule: + self.result['end_port'] = rule['endport'] + if 'icmpcode' in rule: + self.result['icmp_code'] = rule['icmpcode'] + if 'icmptype' in rule: + self.result['icmp_type'] = rule['icmptype'] return self.result From 4c24c1cbfd9ee4189ccb3f3a8de897ca33adf486 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 20:20:37 +0200 Subject: [PATCH 217/720] cloudstack: cs_securitygroup_rule: minor cleanup, DRY --- cloud/cloudstack/cs_securitygroup_rule.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index deeadd06c1a..1896e02a077 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -229,18 +229,21 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): and cidr == rule['cidr'] + def get_end_port(self): + if self.module.params.get('end_port'): + return self.module.params.get('end_port') + return self.module.params.get('start_port') + + def _get_rule(self, rules): user_security_group_name = self.module.params.get('user_security_group') cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') - end_port = self.module.params.get('end_port') + end_port = self.get_end_port() icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') - if not end_port: - end_port = start_port - if protocol in ['tcp', 'udp'] and not (start_port and end_port): self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) @@ -295,15 +298,12 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): args['protocol'] = self.module.params.get('protocol') args['startport'] = self.module.params.get('start_port') - args['endport'] = self.module.params.get('end_port') + args['endport'] = self.get_end_port() args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') args['projectid'] = self.get_project_id() args['securitygroupid'] = security_group['id'] - if not args['endport']: - args['endport'] = args['startport'] - rule = None res = None sg_type = self.module.params.get('type') From 8476fe1d72a5e2bb660160ae2cb6c547d6162e0f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 20:18:19 +0200 Subject: [PATCH 218/720] cloudstack: add alias `port` in cs_firewall --- cloud/cloudstack/cs_firewall.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 13f114c1b35..3949d6a4dbd 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -52,9 +52,10 @@ options: - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null + aliases: [ 'port' ] end_port: description: - - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port). required: false default: null icmp_type: @@ -81,8 +82,7 @@ EXAMPLES = ''' - local_action: module: cs_firewall ip_address: 4.3.2.1 - start_port: 80 - end_port: 80 + port: 80 cidr: 1.2.3.4/32 @@ -90,8 +90,7 @@ EXAMPLES = ''' - local_action: module: cs_firewall ip_address: 4.3.2.1 - start_port: 53 - end_port: 53 + port: 53 protocol: '{{ item }}' with_items: - tcp @@ -128,12 +127,18 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): self.firewall_rule = None + def get_end_port(self): + if self.module.params.get('end_port'): + return self.module.params.get('end_port') + return self.module.params.get('start_port') + + def get_firewall_rule(self): if not self.firewall_rule: cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') - end_port = self.module.params.get('end_port') + end_port = self.get_end_port() icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') @@ -187,7 +192,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): args['cidrlist'] = self.module.params.get('cidr') args['protocol'] = self.module.params.get('protocol') args['startport'] = self.module.params.get('start_port') - args['endport'] = self.module.params.get('end_port') + args['endport'] = self.get_end_port() args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') args['ipaddressid'] = self.get_ip_address_id() @@ -218,12 +223,12 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): def main(): module = AnsibleModule( argument_spec = dict( - ip_address = dict(required=True, default=None), + ip_address = dict(required=True), cidr = dict(default='0.0.0.0/0'), protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'), icmp_type = dict(type='int', default=None), icmp_code = dict(type='int', default=None), - start_port = dict(type='int', default=None), + start_port = dict(type='int', aliases=['port'], default=None), end_port = dict(type='int', default=None), state = dict(choices=['present', 'absent'], default='present'), project = dict(default=None), @@ -232,9 +237,6 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), - required_together = ( - ['start_port', 'end_port'], - ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], From 5bc3ae040dd8ce08b818db0958cb840197cdd633 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 8 May 2015 17:29:38 +0200 Subject: [PATCH 219/720] cloudstack: cs_firewall: add account and domain --- cloud/cloudstack/cs_firewall.py | 39 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 3949d6a4dbd..3f1320fa183 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -68,9 +68,19 @@ options: - Error code for this icmp message. Considered if C(protocol=icmp). required: false default: null + domain: + description: + - Domain the firewall rule is related to. + required: false + default: null + account: + description: + - Account the firewall rule is related to. + required: false + default: null project: description: - - Name of the project. + - Name of the project the firewall rule is related to. required: false default: null extends_documentation_fragment: cloudstack @@ -148,9 +158,11 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): if protocol == 'icmp' and not icmp_type: self.module.fail_json(msg="no icmp_type set") - args = {} - args['ipaddressid'] = self.get_ip_address_id() - args['projectid'] = self.get_project_id() + args = {} + args['ipaddressid'] = self.get_ip_address('id') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') firewall_rules = self.cs.listFirewallRules(**args) if firewall_rules and 'firewallrule' in firewall_rules: @@ -188,14 +200,15 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): firewall_rule = self.get_firewall_rule() if not firewall_rule: self.result['changed'] = True - args = {} - args['cidrlist'] = self.module.params.get('cidr') - args['protocol'] = self.module.params.get('protocol') - args['startport'] = self.module.params.get('start_port') - args['endport'] = self.get_end_port() - args['icmptype'] = self.module.params.get('icmp_type') - args['icmpcode'] = self.module.params.get('icmp_code') - args['ipaddressid'] = self.get_ip_address_id() + args = {} + args['cidrlist'] = self.module.params.get('cidr') + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.get_end_port() + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['ipaddressid'] = self.get_ip_address('id') + if not self.module.check_mode: firewall_rule = self.cs.createFirewallRule(**args) @@ -231,6 +244,8 @@ def main(): start_port = dict(type='int', aliases=['port'], default=None), end_port = dict(type='int', default=None), state = dict(choices=['present', 'absent'], default='present'), + domain = dict(default=None), + account = dict(default=None), project = dict(default=None), api_key = dict(default=None), api_secret = dict(default=None), From c5514e0618206d8fff0a0e4555f57c0bb57040d5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 7 May 2015 19:02:38 +0200 Subject: [PATCH 220/720] cloudstack: cs_instance: add domain and account --- cloud/cloudstack/cs_instance.py | 69 +++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 8680f20ada5..976248824ca 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -106,6 +106,16 @@ options: required: false default: [] aliases: [ 'security_group' ] + domain: + description: + - Domain the instance is related to. + required: false + default: null + account: + description: + - Account the instance is related to. + required: false + default: null project: description: - Name of the project the instance to be deployed in. @@ -252,6 +262,16 @@ ssh_key: returned: success type: string sample: key@work +domain: + description: Domain the instance is related to. + returned: success + type: string + sample: example domain +account: + description: Account the instance is related to. + returned: success + type: string + sample: example account project: description: Name of project the instance is related to. returned: success @@ -352,8 +372,15 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if template and iso: self.module.fail_json(msg="Template are ISO are mutually exclusive.") + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + if template: - templates = self.cs.listTemplates(templatefilter='executable') + args['templatefilter'] = 'executable' + templates = self.cs.listTemplates(**args) if templates: for t in templates['template']: if template in [ t['displaytext'], t['name'], t['id'] ]: @@ -361,7 +388,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.module.fail_json(msg="Template '%s' not found" % template) elif iso: - isos = self.cs.listIsos() + args['isofilter'] = 'executable' + isos = self.cs.listIsos(**args) if isos: for i in isos['iso']: if iso in [ i['displaytext'], i['name'], i['id'] ]: @@ -375,7 +403,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if not disk_offering: return None - disk_offerings = self.cs.listDiskOfferings() + args = {} + args['domainid'] = self.get_domain('id') + + disk_offerings = self.cs.listDiskOfferings(**args) if disk_offerings: for d in disk_offerings['diskoffering']: if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: @@ -388,9 +419,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if not instance: instance_name = self.module.params.get('name') - args = {} - args['projectid'] = self.get_project_id() - args['zoneid'] = self.get_zone_id() + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + instances = self.cs.listVirtualMachines(**args) if instances: for v in instances['virtualmachine']: @@ -405,9 +439,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): if not network_names: return None - args = {} - args['zoneid'] = self.get_zone_id() - args['projectid'] = self.get_project_id() + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + networks = self.cs.listNetworks(**args) if not networks: self.module.fail_json(msg="No networks available") @@ -458,9 +495,11 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args = {} args['templateid'] = self.get_template_or_iso_id() - args['zoneid'] = self.get_zone_id() + args['zoneid'] = self.get_zone('id') args['serviceofferingid'] = self.get_service_offering_id() - args['projectid'] = self.get_project_id() + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') args['diskofferingid'] = self.get_disk_offering_id() args['networkids'] = self.get_network_ids() args['hypervisor'] = self.get_hypervisor() @@ -503,7 +542,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args_ssh_key = {} args_ssh_key['id'] = instance['id'] args_ssh_key['keypair'] = self.module.params.get('ssh_key') - args_ssh_key['projectid'] = self.get_project_id() + args_ssh_key['projectid'] = self.get_project('id') if self._has_changed(args_service_offering, instance) or \ self._has_changed(args_instance_update, instance) or \ @@ -668,6 +707,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['display_name'] = instance['displayname'] if 'group' in instance: self.result['group'] = instance['group'] + if 'domain' in instance: + self.result['domain'] = instance['domain'] + if 'account' in instance: + self.result['account'] = instance['account'] if 'project' in instance: self.result['project'] = instance['project'] if 'publicip' in instance: @@ -732,6 +775,8 @@ def main(): hypervisor = dict(default=None), security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + domain = dict(default=None), + account = dict(default=None), project = dict(default=None), user_data = dict(default=None), zone = dict(default=None), From cc02c3227a4a2065f7b5881b3daaad35b6b8c51e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 22:12:39 +0200 Subject: [PATCH 221/720] cloudstack: new cs_portforward module --- cloud/cloudstack/cs_portforward.py | 432 +++++++++++++++++++++++++++++ 1 file changed, 432 insertions(+) create mode 100644 cloud/cloudstack/cs_portforward.py diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py new file mode 100644 index 00000000000..6c9cbd7e76a --- /dev/null +++ b/cloud/cloudstack/cs_portforward.py @@ -0,0 +1,432 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_portforward +short_description: Manages port forwarding rules on Apache CloudStack based clouds. +description: + - Create, update and remove port forwarding rules. +version_added: '2.0' +author: René Moser +options: + ip_address: + description: + - Public IP address the rule is assigned to. + required: true + vm: + description: + - Name of virtual machine which we make the port forwarding rule for. Required if C(state=present). + required: false + default: null + state: + description: + - State of the port forwarding rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the port forwarding rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp' ] + public_port + description: + - Start public port for this rule. + required: true + public_end_port + description: + - End public port for this rule. If not specific, equal C(public_port). + required: false + default: null + private_port + description: + - Start private port for this rule. + required: true + private_end_port + description: + - End private port for this rule. If not specific, equal C(private_port) + required: false + default: null + open_firewall: + description: + - Whether the firewall rule for public port should be created, while creating the new rule. + - Use M(cs_firewall) for managing firewall rules. + required: false + default: false + vm_guest_ip: + description: + - VM guest NIC secondary IP address for the port forwarding rule. + required: false + default: false + domain: + description: + - Domain the C(vm) is related to. + required: false + default: null + account: + description: + - Account the C(vm) is related to. + required: false + default: null + project: + description: + - Name of the project the c(vm) is located in. + required: false + default: null + zone: + description: + - Name of the zone in which the virtual machine is in. If not set, default zone is used. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +--- +# 1.2.3.4:80 -> web01:8080 +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + vm: web01 + public_port: 80 + private_port: 8080 + + +# forward SSH and open firewall +- local_action: + module: cs_portforward + ip_address: '{{ public_ip }}' + vm: '{{ inventory_hostname }}' + public_port: '{{ ansible_ssh_port }}' + private_port: 22 + open_firewall: true + + +# forward DNS traffic, but do not open firewall +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + vm: '{{ inventory_hostname }}' + public_port: 53 + private_port: 53 + protocol: udp + open_firewall: true + + +# remove ssh port forwarding +- local_action: + module: cs_portforward + ip_address: 1.2.3.4 + public_port: 22 + private_port: 22 + state: absent + +''' + +RETURN = ''' +--- +ip_address: + description: Public IP address. + returned: success + type: string + sample: 1.2.3.4 +protocol: + description: Protocol. + returned: success + type: string + sample: tcp +private_port: + description: Private start port. + returned: success + type: int + sample: 80 +private_end_port: + description: Private end port. + returned: success + type: int +public_port: + description: Public start port. + returned: success + type: int + sample: 80 +public_end_port: + description: Public end port. + returned: success + type: int + sample: 80 +tags: + description: Tag srelated to the port forwarding. + returned: success + type: list + sample: [] +vm_name: + description: Name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_display_name: + description: Display name of the virtual machine. + returned: success + type: string + sample: web-01 +vm_guest_ip: + description: IP of the virtual machine. + returned: success + type: string + sample: 10.101.65.152 +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackPortforwarding(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.portforwarding_rule = None + self.vm_default_nic = None + + + def get_public_end_port(self): + if not self.module.params.get('public_end_port'): + return self.module.params.get('public_port') + return self.module.params.get('public_end_port') + + + def get_private_end_port(self): + if not self.module.params.get('private_end_port'): + return self.module.params.get('private_port') + return self.module.params.get('private_end_port') + + + def get_vm_guest_ip(self): + vm_guest_ip = self.module.params.get('vm_guest_ip') + default_nic = self.get_vm_default_nic() + + if not vm_guest_ip: + return default_nic['ipaddress'] + + for secondary_ip in default_nic['secondaryip']: + if vm_guest_ip == secondary_ip['ipaddress']: + return vm_guest_ip + self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) + + + def get_vm_default_nic(self): + if self.vm_default_nic: + return self.vm_default_nic + + nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) + if nics: + for n in nics['nic']: + if n['isdefault']: + self.vm_default_nic = n + return self.vm_default_nic + self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) + + + def get_portforwarding_rule(self): + if not self.portforwarding_rule: + protocol = self.module.params.get('protocol') + public_port = self.module.params.get('public_port') + public_end_port = self.get_public_end_port() + private_port = self.module.params.get('private_port') + private_end_port = self.get_public_end_port() + + args = {} + args['ipaddressid'] = self.get_ip_address(key='id') + args['projectid'] = self.get_project(key='id') + portforwarding_rules = self.cs.listPortForwardingRules(**args) + + if portforwarding_rules and 'portforwardingrule' in portforwarding_rules: + for rule in portforwarding_rules['portforwardingrule']: + if protocol == rule['protocol'] \ + and public_port == int(rule['publicport']): + self.portforwarding_rule = rule + break + return self.portforwarding_rule + + + def present_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + if portforwarding_rule: + portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule) + else: + portforwarding_rule = self.create_portforwarding_rule() + return portforwarding_rule + + + def create_portforwarding_rule(self): + args = {} + args['protocol'] = self.module.params.get('protocol') + args['publicport'] = self.module.params.get('public_port') + args['publicendport'] = self.get_public_end_port() + args['privateport'] = self.module.params.get('private_port') + args['privateendport'] = self.get_private_end_port() + args['openfirewall'] = self.module.params.get('open_firewall') + args['vmguestip'] = self.get_vm_guest_ip() + args['ipaddressid'] = self.get_ip_address(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + + portforwarding_rule = None + self.result['changed'] = True + if not self.module.check_mode: + portforwarding_rule = self.cs.createPortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + + def update_portforwarding_rule(self, portforwarding_rule): + args = {} + args['protocol'] = self.module.params.get('protocol') + args['publicport'] = self.module.params.get('public_port') + args['publicendport'] = self.get_public_end_port() + args['privateport'] = self.module.params.get('private_port') + args['privateendport'] = self.get_private_end_port() + args['openfirewall'] = self.module.params.get('open_firewall') + args['vmguestip'] = self.get_vm_guest_ip() + args['ipaddressid'] = self.get_ip_address(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + + if self._has_changed(args, portforwarding_rule): + self.result['changed'] = True + if not self.module.check_mode: + # API broken in 4.2.1?, workaround using remove/create instead of update + # portforwarding_rule = self.cs.updatePortForwardingRule(**args) + self.absent_portforwarding_rule() + portforwarding_rule = self.cs.createPortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + + def absent_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + + if portforwarding_rule: + self.result['changed'] = True + args = {} + args['id'] = portforwarding_rule['id'] + + if not self.module.check_mode: + res = self.cs.deletePortForwardingRule(**args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self._poll_job(res, 'portforwardingrule') + return portforwarding_rule + + + def get_result(self, portforwarding_rule): + if portforwarding_rule: + if 'id' in portforwarding_rule: + self.result['id'] = portforwarding_rule['id'] + if 'virtualmachinedisplayname' in portforwarding_rule: + self.result['vm_display_name'] = portforwarding_rule['virtualmachinedisplayname'] + if 'virtualmachinename' in portforwarding_rule: + self.result['vm_name'] = portforwarding_rule['virtualmachinename'] + if 'ipaddress' in portforwarding_rule: + self.result['ip_address'] = portforwarding_rule['ipaddress'] + if 'vmguestip' in portforwarding_rule: + self.result['vm_guest_ip'] = portforwarding_rule['vmguestip'] + if 'publicport' in portforwarding_rule: + self.result['public_port'] = portforwarding_rule['publicport'] + if 'publicendport' in portforwarding_rule: + self.result['public_end_port'] = portforwarding_rule['publicendport'] + if 'privateport' in portforwarding_rule: + self.result['private_port'] = portforwarding_rule['privateport'] + if 'privateendport' in portforwarding_rule: + self.result['private_end_port'] = portforwarding_rule['privateendport'] + if 'protocol' in portforwarding_rule: + self.result['protocol'] = portforwarding_rule['protocol'] + if 'tags' in portforwarding_rule: + self.result['tags'] = [] + for tag in portforwarding_rule['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True), + protocol= dict(choices=['tcp', 'udp'], default='tcp'), + public_port = dict(type='int', required=True), + public_end_port = dict(type='int', default=None), + private_port = dict(type='int', required=True), + private_end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + open_firewall = dict(choices=BOOLEANS, default=False), + vm_guest_ip = dict(default=None), + vm = dict(default=None), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_pf = AnsibleCloudStackPortforwarding(module) + state = module.params.get('state') + if state in ['absent']: + pf_rule = acs_pf.absent_portforwarding_rule() + else: + pf_rule = acs_pf.present_portforwarding_rule() + + result = acs_pf.get_result(pf_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 0edf04dca2df6d4b2940330ea340143b7834f970 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 9 May 2015 01:15:24 +0200 Subject: [PATCH 222/720] cloudstack: cs_vmshapshot: add account and domain --- cloud/cloudstack/cs_vmsnapshot.py | 55 +++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 10 deletions(-) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index dad660cd77c..5a476e30795 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -62,6 +62,16 @@ options: required: false default: 'present' choices: [ 'present', 'absent', 'revert' ] + domain: + description: + - Domain the VM snapshot is related to. + required: false + default: null + account: + description: + - Account the VM snapshot is related to. + required: false + default: null poll_async: description: - Poll async jobs until job has finished. @@ -134,6 +144,21 @@ description: returned: success type: string sample: snapshot brought to you by Ansible +domain: + description: Domain the the vm snapshot is related to. + returned: success + type: string + sample: example domain +account: + description: Account the vm snapshot is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the vm snapshot is related to. + returned: success + type: string + sample: Production ''' try: @@ -156,10 +181,12 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): def get_snapshot(self): - args = {} - args['virtualmachineid'] = self.get_vm_id() - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') + args = {} + args['virtualmachineid'] = self.get_vm('id') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['name'] = self.module.params.get('name') snapshots = self.cs.listVMSnapshot(**args) if snapshots: @@ -172,11 +199,11 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): if not snapshot: self.result['changed'] = True - args = {} - args['virtualmachineid'] = self.get_vm_id() - args['name'] = self.module.params.get('name') - args['description'] = self.module.params.get('description') - args['snapshotmemory'] = self.module.params.get('snapshot_memory') + args = {} + args['virtualmachineid'] = self.get_vm('id') + args['name'] = self.module.params.get('name') + args['description'] = self.module.params.get('description') + args['snapshotmemory'] = self.module.params.get('snapshot_memory') if not self.module.check_mode: res = self.cs.createVMSnapshot(**args) @@ -242,6 +269,12 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): self.result['name'] = snapshot['name'] if 'description' in snapshot: self.result['description'] = snapshot['description'] + if 'domain' in snapshot: + self.result['domain'] = snapshot['domain'] + if 'account' in snapshot: + self.result['account'] = snapshot['account'] + if 'project' in snapshot: + self.result['project'] = snapshot['project'] return self.result @@ -251,10 +284,12 @@ def main(): name = dict(required=True, aliases=['displayname']), vm = dict(required=True), description = dict(default=None), - project = dict(default=None), zone = dict(default=None), snapshot_memory = dict(choices=BOOLEANS, default=False), state = dict(choices=['present', 'absent', 'revert'], default='present'), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), api_secret = dict(default=None), From 24b2b29abaa3669048092b00c9905d4e746fb018 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 8 May 2015 17:53:57 +0200 Subject: [PATCH 223/720] cloudstack: cs_firewall: add results --- cloud/cloudstack/cs_firewall.py | 57 ++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 3f1320fa183..54a9e137756 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -19,6 +19,7 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' +--- module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. description: @@ -117,6 +118,45 @@ EXAMPLES = ''' state: absent ''' +RETURN = ''' +--- +ip_address: + description: IP address of the rule. + returned: success + type: string + sample: 10.100.212.10 +cidr: + description: CIDR of the rule. + returned: success + type: string + sample: 0.0.0.0/0 +protocol: + description: Protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: Start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: End port of the rule. + returned: success + type: int + sample: 80 +icmp_code: + description: ICMP code of the rule. + returned: success + type: int + sample: 1 +icmp_type: + description: ICMP type of the rule. + returned: success + type: int + sample: 1 +''' + try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True @@ -200,6 +240,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): firewall_rule = self.get_firewall_rule() if not firewall_rule: self.result['changed'] = True + args = {} args['cidrlist'] = self.module.params.get('cidr') args['protocol'] = self.module.params.get('protocol') @@ -209,7 +250,6 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): args['icmpcode'] = self.module.params.get('icmp_code') args['ipaddressid'] = self.get_ip_address('id') - if not self.module.check_mode: firewall_rule = self.cs.createFirewallRule(**args) @@ -230,6 +270,21 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): def get_result(self, firewall_rule): + if firewall_rule: + if 'cidrlist' in firewall_rule: + self.result['cidr'] = firewall_rule['cidrlist'] + if 'startport' in firewall_rule: + self.result['start_port'] = int(firewall_rule['startport']) + if 'endport' in firewall_rule: + self.result['end_port'] = int(firewall_rule['endport']) + if 'protocol' in firewall_rule: + self.result['protocol'] = firewall_rule['protocol'] + if 'ipaddress' in firewall_rule: + self.result['ip_address'] = firewall_rule['ipaddress'] + if 'icmpcode' in firewall_rule: + self.result['icmp_code'] = int(firewall_rule['icmpcode']) + if 'icmptype' in firewall_rule: + self.result['icmp_type'] = int(firewall_rule['icmptype']) return self.result From e3373ffc46d5b318222a6dd71d6790bcdecb43be Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 08:14:44 -0700 Subject: [PATCH 224/720] Fix documentation --- cloud/cloudstack/cs_portforward.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 6c9cbd7e76a..d5ce0ac4825 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -48,20 +48,20 @@ options: required: false default: 'tcp' choices: [ 'tcp', 'udp' ] - public_port + public_port: description: - Start public port for this rule. required: true - public_end_port + public_end_port: description: - End public port for this rule. If not specific, equal C(public_port). required: false default: null - private_port + private_port: description: - Start private port for this rule. required: true - private_end_port + private_end_port: description: - End private port for this rule. If not specific, equal C(private_port) required: false From fbc0bcbd9c1190d19c4468b6cf860a3ba06625f2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 13:59:24 -0400 Subject: [PATCH 225/720] minor doc fixes --- database/mysql/mysql_replication.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 92d98fe3b2b..96d5be0ce39 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -93,7 +93,7 @@ options: master_ssl: description: - same as mysql variable - possible values: 0,1 + choices: [ 0, 1 ] master_ssl_ca: description: - same as mysql variable @@ -110,8 +110,11 @@ options: description: - same as mysql variable master_auto_position: - descrtiption: + description: - does the host uses GTID based replication or not + required: false + default: null + version_added: "2.0" ''' EXAMPLES = ''' From 916cc17acfc6bf9e98883263bcac657079293b61 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 14:02:57 -0400 Subject: [PATCH 226/720] minor doc fixes --- network/f5/bigip_pool_member.py | 34 ++------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 8f84d4669ed..11ae9f35646 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -39,23 +39,14 @@ options: description: - BIG-IP host required: true - default: null - choices: [] - aliases: [] user: description: - BIG-IP username required: true - default: null - choices: [] - aliases: [] password: description: - BIG-IP password required: true - default: null - choices: [] - aliases: [] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used @@ -70,79 +61,58 @@ options: required: true default: present choices: ['present', 'absent'] - aliases: [] session_state: description: - Set new session availability status for pool member - version_added: "1.9" + version_added: "2.0" required: false default: null choices: ['enabled', 'disabled'] - aliases: [] monitor_state: description: - Set monitor availability status for pool member - version_added: "1.9" + version_added: "2.0" required: false default: null choices: ['enabled', 'disabled'] - aliases: [] pool: description: - Pool name. This pool must exist. required: true - default: null - choices: [] - aliases: [] partition: description: - Partition required: false default: 'Common' - choices: [] - aliases: [] host: description: - Pool member IP required: true - default: null - choices: [] aliases: ['address', 'name'] port: description: - Pool member port required: true - default: null - choices: [] - aliases: [] connection_limit: description: - Pool member connection limit. Setting this to 0 disables the limit. required: false default: null - choices: [] - aliases: [] description: description: - Pool member description required: false default: null - choices: [] - aliases: [] rate_limit: description: - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. required: false default: null - choices: [] - aliases: [] ratio: description: - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. required: false default: null - choices: [] - aliases: [] ''' EXAMPLES = ''' From 997a0a11d0844647daff85f8fbe93bd4029b9393 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:07:05 -0400 Subject: [PATCH 227/720] Adding author's github id --- cloud/amazon/cloudtrail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 312b9b08c9c..6a1885d6ee7 100755 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -21,7 +21,7 @@ short_description: manage CloudTrail creation and deletion description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. version_added: "2.0" -author: Ted Timmons +author: "Ted Timmons (@tedder)" requirements: - "boto >= 2.21" options: From 17e755786e81767100693bdb2673d0000bb47230 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:09:05 -0400 Subject: [PATCH 228/720] Adding author's github id --- cloud/cloudstack/cs_account.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index cd6a1228807..014859cc12c 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -25,7 +25,7 @@ short_description: Manages account on Apache CloudStack based clouds. description: - Create, disable, lock, enable and remove accounts. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From b82c1454f8b3c850c1acccb93595a9b6859bc48f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:09:51 -0400 Subject: [PATCH 229/720] Adding author's github id --- cloud/cloudstack/cs_affinitygroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 07b9cf42d6a..c083cd9dc21 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -25,7 +25,7 @@ short_description: Manages affinity groups on Apache CloudStack based clouds. description: - Create and remove affinity groups. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From 5a064c5b9a29c2d981d1964ccda3631125cad8cd Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:10:14 -0400 Subject: [PATCH 230/720] Adding author's github id --- cloud/cloudstack/cs_firewall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 54a9e137756..d69ba12a3de 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -25,7 +25,7 @@ short_description: Manages firewall rules on Apache CloudStack based clouds. description: - Creates and removes firewall rules. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: ip_address: description: From 219d2616146cdb366ae1f23a41abe9a1b7abdacc Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:10:36 -0400 Subject: [PATCH 231/720] Update cs_instance.py --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 976248824ca..864f7a14ed6 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -25,7 +25,7 @@ short_description: Manages instances and virtual machines on Apache CloudStack b description: - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From 241b6eb54143baf8f78ad9243024bee54934aa39 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:10:55 -0400 Subject: [PATCH 232/720] Adding author's github id --- cloud/cloudstack/cs_instancegroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 2c47a9f6f25..a9b35147691 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -25,7 +25,7 @@ short_description: Manages instance groups on Apache CloudStack based clouds. description: - Create and remove instance groups. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From 06508a745a6c55277f2978112fcd88010589cba1 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:11:14 -0400 Subject: [PATCH 233/720] Adding author's github id --- cloud/cloudstack/cs_iso.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 83af1e1783e..0afdeebc8dc 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -25,7 +25,7 @@ short_description: Manages ISOs images on Apache CloudStack based clouds. description: - Register and remove ISO images. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From 5d0887bd944fd50cde3bc554a8093891063b8f08 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:11:34 -0400 Subject: [PATCH 234/720] Adding author's github id --- cloud/cloudstack/cs_portforward.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index d5ce0ac4825..dc714dfb65c 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -25,7 +25,7 @@ short_description: Manages port forwarding rules on Apache CloudStack based clou description: - Create, update and remove port forwarding rules. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: ip_address: description: From 9a039fa462bd00b4241608a3f22fb54784cdc350 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:12:04 -0400 Subject: [PATCH 235/720] Update cs_securitygroup.py --- cloud/cloudstack/cs_securitygroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 50556da5bb3..2ce56ff8a56 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -25,7 +25,7 @@ short_description: Manages security groups on Apache CloudStack based clouds. description: - Create and remove security groups. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From 93f613d0eb454b532324d1fa071b84d8228c8ec4 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:12:29 -0400 Subject: [PATCH 236/720] Adding author's github id --- cloud/cloudstack/cs_securitygroup_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 1896e02a077..fdb566c08c6 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -25,7 +25,7 @@ short_description: Manages security group rules on Apache CloudStack based cloud description: - Add and remove security group rules. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: security_group: description: From dbdf70485a065f7e0558670559ea21e71b8df34e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:12:50 -0400 Subject: [PATCH 237/720] Adding author's github id --- cloud/cloudstack/cs_sshkeypair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 1f1aa2c1fdd..8c38603ba1c 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -27,7 +27,7 @@ description: - If no key was found and no public key was provided and a new SSH private/public key pair will be created and the private key will be returned. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From d6781a5c8910e4bcddd74711d9320ce34e309faa Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:13:09 -0400 Subject: [PATCH 238/720] Adding author's github id --- cloud/cloudstack/cs_vmsnapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 5a476e30795..868af820f99 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -25,7 +25,7 @@ short_description: Manages VM snapshots on Apache CloudStack based clouds. description: - Create, remove and revert VM from snapshots. version_added: '2.0' -author: René Moser +author: "René Moser (@resmo)" options: name: description: From a0ce79250d61f6aea46bd2fb4e7fa78912b95ca2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:14:35 -0400 Subject: [PATCH 239/720] Adding author's github id --- cloud/google/gce_img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index ae4c31dc4c5..466a0580721 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -81,7 +81,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud" -author: Peter Tan +author: "Peter Tan (@tanpeter)" ''' EXAMPLES = ''' From 9180538277bee6cdfd0f848f53905bb0712a92fc Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:15:37 -0400 Subject: [PATCH 240/720] Adding author's github id --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index b36b60e5ba2..74c55357501 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -26,7 +26,7 @@ short_description: Manage LXC Containers version_added: 1.8.0 description: - Management of LXC containers -author: Kevin Carter +author: "Kevin Carter (@cloudnull)" options: name: description: From 3e6e179b1b5acbde4c96ffe1736cc3a8c209a06a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:16:30 -0400 Subject: [PATCH 241/720] Adding author's github id --- cloud/misc/ovirt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py index 258637ddaec..183a2394708 100755 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ovirt -author: Vincent Van der Kussen +author: "Vincent Van der Kussen (@vincentvdk)" short_description: oVirt/RHEV platform management description: - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform From e732b478ef3bc8068d0a0b8ad38ad82cb346c965 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:38:50 -0400 Subject: [PATCH 242/720] Update virt.py --- cloud/misc/virt.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index a2567f586a9..540dddc3ba4 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -58,7 +58,10 @@ options: requirements: - "python >= 2.6" - "libvirt-python" -author: Michael DeHaan, Seth Vidal +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" + - "Seth Vidal (@skvidal)" ''' EXAMPLES = ''' From dfd227f82cbd46b81ab2747140949f51d0a8a53a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:40:45 -0400 Subject: [PATCH 243/720] Adding author's github id --- cloud/vmware/vmware_datacenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py index 10a6f493bf8..b9101fc2626 100644 --- a/cloud/vmware/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -25,7 +25,7 @@ short_description: Manage VMware vSphere Datacenters description: - Manage VMware vSphere Datacenters version_added: 2.0 -author: Joseph Callen +author: "Joseph Callen (@jcpowermac)" notes: - Tested on vSphere 5.5 requirements: From 6f30acd0d26db1162545f3ed6f64d8fdf855ef7b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:42:46 -0400 Subject: [PATCH 244/720] Adding author's github id --- clustering/consul.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul.py b/clustering/consul.py index 27761094698..1c1fa1ab4e8 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -42,7 +42,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: Steve Gargan (steve.gargan@gmail.com) +author: "Steve Gargan (@sgargan)" options: state: description: From 7ed647d29252d98b6b2cf4ff90bfae4643f74754 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:43:09 -0400 Subject: [PATCH 245/720] Adding author's github id --- clustering/consul_acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index 57ff795931f..a7fbc16b0ca 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -30,7 +30,7 @@ requirements: - pyhcl - requests version_added: "1.9" -author: Steve Gargan (steve.gargan@gmail.com) +author: "Steve Gargan (@sgargan)" options: mgmt_token: description: From da89e65dc00c9cff4c630cea93c00c5ada8a6522 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:43:28 -0400 Subject: [PATCH 246/720] Adding author's github id --- clustering/consul_kv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 02d06868946..7855368a8ab 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -32,7 +32,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: Steve Gargan (steve.gargan@gmail.com) +author: "Steve Gargan (@sgargan)" options: state: description: From 609b6d05f306c7f2353ca84a4358ae5007e20b86 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:43:50 -0400 Subject: [PATCH 247/720] Adding author's github id --- clustering/consul_session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul_session.py b/clustering/consul_session.py index 6f3a611a642..278cb4e26ed 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -30,7 +30,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: Steve Gargan (steve.gargan@gmail.com) +author: "Steve Gargan (@sgargan)" options: state: description: From cdc51aa0787d46e62f3166c9d1c82903dbaa9836 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:45:05 -0400 Subject: [PATCH 248/720] Adding author's github id --- database/misc/mongodb_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 3a3cf4dfff1..cb4b530b646 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -91,7 +91,7 @@ notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] -author: Elliott Foster +author: "Elliott Foster (@elliotttf)" ''' EXAMPLES = ''' From 124b7eca324f083ebb3176ba34f519796a228083 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:46:08 -0400 Subject: [PATCH 249/720] Adding author's github id --- database/misc/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/misc/redis.py b/database/misc/redis.py index eb9654631e7..42e364a8e61 100644 --- a/database/misc/redis.py +++ b/database/misc/redis.py @@ -98,7 +98,7 @@ notes: this needs to be in the redis.conf in the masterauth variable requirements: [ redis ] -author: Xabier Larrakoetxea +author: "Xabier Larrakoetxea (@slok)" ''' EXAMPLES = ''' From 51ffa51f113188432b9267c75ab53a49faf50f74 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:52:29 -0400 Subject: [PATCH 250/720] Adding author's github id --- database/misc/riak.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/database/misc/riak.py b/database/misc/riak.py index b30e7dc485d..f6c0d64ee42 100644 --- a/database/misc/riak.py +++ b/database/misc/riak.py @@ -26,6 +26,9 @@ description: - This module can be used to join nodes to a cluster, check the status of the cluster. version_added: "1.2" +author: + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" options: command: description: From b8a7f56b14f612df636907e4b3b0f30cb15919e6 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:55:28 -0400 Subject: [PATCH 251/720] Update mysql_replication.py --- database/mysql/mysql_replication.py | 1 + 1 file changed, 1 insertion(+) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 96d5be0ce39..2bad4c1439e 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -30,6 +30,7 @@ short_description: Manage MySQL replication description: - Manages MySQL server replication, slave, master status get and change master host. version_added: "1.3" +author: "Balazs Pocze (@banyek)" options: mode: description: From d93a6b4c8ec58268be19def4718d2cdb4fcf78a5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 14:56:24 -0400 Subject: [PATCH 252/720] Adding author's github id --- database/postgresql/postgresql_ext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py index d70107a4cf9..07ed48e9d03 100644 --- a/database/postgresql/postgresql_ext.py +++ b/database/postgresql/postgresql_ext.py @@ -65,7 +65,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Daniel Schep +author: "Daniel Schep (@dschep)" ''' EXAMPLES = ''' From dd10475a4e0e16585b508ca65ae48dccf5c9f3bf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:02:33 -0400 Subject: [PATCH 253/720] Adding author's github id --- database/postgresql/postgresql_lang.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_lang.py b/database/postgresql/postgresql_lang.py index ec0507b5508..f3b1baa4d9a 100644 --- a/database/postgresql/postgresql_lang.py +++ b/database/postgresql/postgresql_lang.py @@ -95,7 +95,7 @@ notes: systems, install the postgresql, libpq-dev, and python-psycopg2 packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Jens Depuydt +author: "Jens Depuydt (@jensdepuydt)" ''' EXAMPLES = ''' From 0fca8e4e3113e7367b113786df8c8f652c590c41 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:04:17 -0400 Subject: [PATCH 254/720] Adding author's github id --- database/vertica/vertica_configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index ad74c0f23f2..ed75667b139 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -67,7 +67,7 @@ notes: and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] -author: Dariusz Owczarek +author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ From a4effefb33586ca73a07324470715b59abe80f1c Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:04:34 -0400 Subject: [PATCH 255/720] Adding author's github id --- database/vertica/vertica_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index b7e0ac4ad5a..705b74a04f5 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -59,7 +59,7 @@ notes: and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] -author: Dariusz Owczarek +author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ From 5eea108e2c030cb848a0188d8113efea8605f380 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:04:53 -0400 Subject: [PATCH 256/720] Adding author's github id --- database/vertica/vertica_role.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index ef56a58a866..b7a0a5d66ef 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -75,7 +75,7 @@ notes: and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] -author: Dariusz Owczarek +author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ From 7107da12e24bd062a344a7cfd94dfbda8fce8a27 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:05:17 -0400 Subject: [PATCH 257/720] Adding author's github id --- database/vertica/vertica_schema.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index d0ed2ce05b0..39ccb0b60e8 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -91,7 +91,7 @@ notes: and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] -author: Dariusz Owczarek +author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ From 56cf538b716911d6a83b1c560c17b998436579fb Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:06:01 -0400 Subject: [PATCH 258/720] Adding author's github id --- database/vertica/vertica_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 576e8b887ef..7c52df3163a 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -107,7 +107,7 @@ notes: and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] -author: Dariusz Owczarek +author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ From 9677437a6e3ec547872aaf3b36f400e38e09f1bb Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:10:16 -0400 Subject: [PATCH 259/720] Adding author's github id --- files/patch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/files/patch.py b/files/patch.py index e0d7d1d335d..809069c9bac 100755 --- a/files/patch.py +++ b/files/patch.py @@ -22,7 +22,9 @@ DOCUMENTATION = ''' --- module: patch -author: Luis Alberto Perez Lazaro, Jakub Jirutka +author: + - "Jakub Jirutka (@jirutka)" + - "Luis Alberto Perez Lazaro (@luisperlaz)" version_added: 1.9 description: - Apply patch files using the GNU patch tool. From 9259ff62ddd0c47a52518a1df079f31ce4431749 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:11:57 -0400 Subject: [PATCH 260/720] Adding author's github id --- messaging/rabbitmq_parameter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py index 2f78bd4ee15..85b29cdfd10 100644 --- a/messaging/rabbitmq_parameter.py +++ b/messaging/rabbitmq_parameter.py @@ -25,7 +25,7 @@ short_description: Adds or removes parameters to RabbitMQ description: - Manage dynamic, cluster-wide parameters for RabbitMQ version_added: "1.1" -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" options: component: description: From c94776fa36402b2ada90f4b4103801e376f33602 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:12:15 -0400 Subject: [PATCH 261/720] Adding author's github id --- messaging/rabbitmq_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_plugin.py b/messaging/rabbitmq_plugin.py index 53c38f978d5..8fa1f3d0f17 100644 --- a/messaging/rabbitmq_plugin.py +++ b/messaging/rabbitmq_plugin.py @@ -25,7 +25,7 @@ short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" options: names: description: From db14acbb5414220363f482d52ae153d925689bc0 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:13:02 -0400 Subject: [PATCH 262/720] Adding author's github id --- messaging/rabbitmq_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py index 800c3822d55..97a800d854f 100644 --- a/messaging/rabbitmq_policy.py +++ b/messaging/rabbitmq_policy.py @@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ. description: - Manage the state of a virtual host in RabbitMQ. version_added: "1.5" -author: John Dewey +author: "John Dewey (@retr0h)" options: name: description: From 994ea305a8dd030a1379657a199a114b353b5208 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:13:45 -0400 Subject: [PATCH 263/720] Adding author's github id --- messaging/rabbitmq_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index f494ce802d9..41433c05190 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -25,7 +25,7 @@ short_description: Adds or removes users to RabbitMQ description: - Add or remove users to RabbitMQ and assign permissions version_added: "1.1" -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" options: user: description: From 2f9f6cc28ef02f32bc8c36a221e25f52e623e244 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:14:04 -0400 Subject: [PATCH 264/720] Adding author's github id --- messaging/rabbitmq_vhost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_vhost.py b/messaging/rabbitmq_vhost.py index fd4b04a683f..d1bee397caf 100644 --- a/messaging/rabbitmq_vhost.py +++ b/messaging/rabbitmq_vhost.py @@ -26,7 +26,7 @@ short_description: Manage the state of a virtual host in RabbitMQ description: - Manage the state of a virtual host in RabbitMQ version_added: "1.1" -author: Chris Hoffman +author: "Chris Hoffman (@choffman)" options: name: description: From 6498940461774530612aee772790e4ef7c7da089 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:16:13 -0400 Subject: [PATCH 265/720] Adding author's github id --- monitoring/airbrake_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index e1c490b881b..c3686ae699b 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: airbrake_deployment version_added: "1.2" -author: Bruce Pennypacker +author: "Bruce Pennypacker (@bpennypacker)" short_description: Notify airbrake about app deployments description: - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) From 9a542327dd564cd3aaa205e80b6f302352f9eb8d Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:18:01 -0400 Subject: [PATCH 266/720] Adding author's github id --- monitoring/bigpanda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/bigpanda.py b/monitoring/bigpanda.py index b1a45145ede..3bed44893b7 100644 --- a/monitoring/bigpanda.py +++ b/monitoring/bigpanda.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' --- module: bigpanda -author: BigPanda +author: "Hagai Kariti (@hkariti)" short_description: Notify BigPanda about deployments version_added: "1.8" description: From 6315cbe65334e7036fdaaa2cf7cd80ab5f07a623 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:19:22 -0400 Subject: [PATCH 267/720] Adding author's github id --- monitoring/boundary_meter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index f6d84328597..daab4820408 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -34,7 +34,7 @@ short_description: Manage boundary meters description: - This module manages boundary meters version_added: "1.3" -author: curtis@serverascode.com +author: "curtis (@ccollicutt)" requirements: - Boundary API access - bprobe is required to send data, but not to register a meter From 324a501d1c64da24f401404c9815536dc7b346d2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:20:02 -0400 Subject: [PATCH 268/720] Adding author's github id --- monitoring/circonus_annotation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index e2b6d8d79ed..1585cd8080a 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -12,7 +12,7 @@ module: circonus_annotation short_description: create an annotation in circonus description: - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided -author: Nick Harring +author: "Nick Harring (@NickatEpic)" version_added: 2.0 requirements: - urllib3 From 593e6cb27c986516156facd5b08a85a60672eaad Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:21:13 -0400 Subject: [PATCH 269/720] Adding author's github id --- monitoring/datadog_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 5d38dd4c31d..ed5439ac061 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -14,7 +14,7 @@ description: - "Allows to post events to DataDog (www.datadoghq.com) service." - "Uses http://docs.datadoghq.com/api/#events API." version_added: "1.3" -author: Artūras 'arturaz' Šlajus +author: "Artūras 'arturaz' Šlajus (@arturaz)" notes: [] requirements: [urllib2] options: From 26d6e0c6a7afe1971541ec4709ec00b6e38dbddb Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:22:11 -0400 Subject: [PATCH 270/720] Adding author's github id --- monitoring/librato_annotation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index 161caebb07c..88d3bb81f7b 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -29,7 +29,7 @@ short_description: create an annotation in librato description: - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically version_added: "1.6" -author: Seth Edwards +author: "Seth Edwards (@sedward)" requirements: - urllib2 - base64 From 415f22dffd2b47b132daf50a7ec668072d0e5ad3 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:34:38 -0400 Subject: [PATCH 271/720] Adding author's github id --- monitoring/logentries.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index a19885ea702..1d511dc4e4a 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: logentries -author: Ivan Vanderbyl +author: "Ivan Vanderbyl (@ivanvanderbyl)" short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime From 396b9d921a248baf7aa431e1947859cc0706c038 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:35:25 -0400 Subject: [PATCH 272/720] Add --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 8772d22b2d8..52e56bfc44e 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -39,7 +39,7 @@ options: default: null choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] requirements: [ ] -author: Darryl Stoflet +author: "Darryl Stoflet (@dstoflet)" ''' EXAMPLES = ''' From bbfd46c32acd93bf12693db03fcce23bf6f51529 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:36:21 -0400 Subject: [PATCH 273/720] Adding author's github id --- monitoring/nagios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index c564e712b04..bf2fded7577 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -73,7 +73,7 @@ options: required: true default: null -author: Tim Bielawa +author: "Tim Bielawa (@tbielawa)" requirements: [ "Nagios" ] ''' From a446b69b36d5d8a2a6f537b6df00e7df9fd37d65 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:37:16 -0400 Subject: [PATCH 274/720] Adding author's github id --- monitoring/newrelic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 93d55832fd3..27e6f52b8e6 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: newrelic_deployment version_added: "1.2" -author: Matt Coddington +author: "Matt Coddington (@mcodd)" short_description: Notify newrelic about app deployments description: - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) From 10d798e0f3e20a7e9bc8970c15983968be4e88d4 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:48:15 -0400 Subject: [PATCH 275/720] Adding author's github id --- monitoring/pagerduty.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index aa6903414dd..7deca7e19b0 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -7,7 +7,10 @@ short_description: Create PagerDuty maintenance windows description: - This module will let you create PagerDuty maintenance windows version_added: "1.2" -author: Justin Johns +author: + - Andrew Newdigate (@suprememoocow) + - Dylan Silva (@thaumos) + - Justin Johns requirements: - PagerDuty API access options: From d3615bc8d41eff86d8501c4c7844bfa7a4aad06c Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:51:08 -0400 Subject: [PATCH 276/720] Adding author's github id --- monitoring/pingdom.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 0ae1af357e0..fd06a1217cb 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -7,7 +7,9 @@ short_description: Pause/unpause Pingdom alerts description: - This module will let you pause/unpause Pingdom alerts version_added: "1.2" -author: Justin Johns +author: + - "Dylan Silva (@thaumos)" + - "Justin Johns" requirements: - "This pingdom python library: https://github.com/mbabineau/pingdom-python" options: From f7a616f202108991a09ea700129bbd8982905596 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:51:34 -0400 Subject: [PATCH 277/720] Formatting fix in author list --- monitoring/pagerduty.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index 7deca7e19b0..24c622c83a8 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -8,9 +8,9 @@ description: - This module will let you create PagerDuty maintenance windows version_added: "1.2" author: - - Andrew Newdigate (@suprememoocow) - - Dylan Silva (@thaumos) - - Justin Johns + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns" requirements: - PagerDuty API access options: From 0738aafebd9d860e079da3fcb7e8fd60865252ec Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:53:59 -0400 Subject: [PATCH 278/720] Adding author's github id --- monitoring/rollbar_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 772e78fc5c2..5a643697e5b 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rollbar_deployment version_added: 1.6 -author: Max Riveiro +author: "Max Riveiro (@kavu)" short_description: Notify Rollbar about app deployments description: - Notify Rollbar about app deployments From afc4e16551b9eb7bdc0f686ad8236e4570db03cd Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:54:35 -0400 Subject: [PATCH 279/720] Adding author's github id --- monitoring/stackdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/stackdriver.py b/monitoring/stackdriver.py index c36964dd9d2..570e6659ac0 100644 --- a/monitoring/stackdriver.py +++ b/monitoring/stackdriver.py @@ -8,7 +8,7 @@ short_description: Send code deploy and annotation events to stackdriver description: - Send code deploy and annotation events to Stackdriver version_added: "1.6" -author: Ben Whaley +author: "Ben Whaley (@bwhaley)" options: key: description: From 0f5d8abdc9d847984d2936f542eb42e5d89d6cbf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 15:55:15 -0400 Subject: [PATCH 280/720] Adding author's github id --- monitoring/uptimerobot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/uptimerobot.py b/monitoring/uptimerobot.py index 889d144c9b3..6d5c9c7bac0 100644 --- a/monitoring/uptimerobot.py +++ b/monitoring/uptimerobot.py @@ -6,7 +6,7 @@ module: uptimerobot short_description: Pause and start Uptime Robot monitoring description: - This module will let you start and pause Uptime Robot Monitoring -author: Nate Kingsley +author: "Nate Kingsley (@nate-kingsley)" version_added: "1.9" requirements: - Valid Uptime Robot API Key From 13200d4c787c6598a9b156b1d3f3a4a244fbb20e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:06:43 -0400 Subject: [PATCH 281/720] Adding author's github id --- monitoring/zabbix_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index d338267e370..73aa9400a6d 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -64,7 +64,7 @@ options: required: true notes: - The module has been tested with Zabbix Server 2.2. -author: René Moser +author: "René Moser (@resmo)" ''' EXAMPLES = ''' From 81b696ca9766082dc02fc542cf8390e9193d57fe Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:11:01 -0400 Subject: [PATCH 282/720] Adding author's github id --- monitoring/zabbix_host.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 5798e663355..f97680f1502 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -26,7 +26,9 @@ short_description: Zabbix host creates/updates/deletes description: - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. version_added: "2.0" -author: Tony Minfei Ding, Harrison Gu +author: + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" requirements: - "python >= 2.6" - zabbix-api From def8ae9c475753091313bf2c07b60962049b2c3a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:13:31 -0400 Subject: [PATCH 283/720] Adding author's github id ...though the real author is perhaps a mystery. --- monitoring/zabbix_host.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index f97680f1502..2d6ce04b830 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -27,6 +27,7 @@ description: - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. version_added: "2.0" author: + - "(@cove)" - "Tony Minfei Ding" - "Harrison Gu (@harrisongu)" requirements: From feb71e82e370e0424b1e638342ab0c8a76985f19 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:14:21 -0400 Subject: [PATCH 284/720] Adding author's github id --- monitoring/zabbix_maintenance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index f59149fd07a..859e79dc8f2 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -26,7 +26,7 @@ short_description: Create Zabbix maintenance windows description: - This module will let you create Zabbix maintenance windows. version_added: "1.8" -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" requirements: - "python >= 2.6" - zabbix-api From a00065329271ff0d1ca50d58e7ff45f50f671d82 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:15:54 -0400 Subject: [PATCH 285/720] Adding author's github id --- monitoring/zabbix_hostmacro.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 7869d55c315..a8649454025 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -26,7 +26,9 @@ short_description: Zabbix host macro creates/updates/deletes description: - manages Zabbix host macros, it can create, update or delete them. version_added: "2.0" -author: Dean Hailin Song +author: + - "(@cave)" + - Dean Hailin Song requirements: - "python >= 2.6" - zabbix-api From 66c83e7ae318a7e694d951bb4e0dcf7380efc06f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 16:16:54 -0400 Subject: [PATCH 286/720] Adding author's github id --- monitoring/zabbix_screen.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index aa895af0bbd..932681617f5 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -27,7 +27,10 @@ short_description: Zabbix screen creates/updates/deletes description: - This module allows you to create, modify and delete Zabbix screens and associated graph data. version_added: "2.0" -author: Tony Minfei Ding, Harrison Gu +author: + - "(@cove)" + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" requirements: - "python >= 2.6" - zabbix-api From fab87ebb48234e3af62254d9cef0375b92d7c754 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:24:32 -0400 Subject: [PATCH 287/720] Adding author's github id --- network/a10/a10_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 65410536eef..58cda6c402b 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb server objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 options: From 06dc55bd5f67d4393f7f8d212b0ebbf5938873a8 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:25:02 -0400 Subject: [PATCH 288/720] Adding author's github id --- network/a10/a10_service_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py index 3627e2d12b8..277021bce33 100644 --- a/network/a10/a10_service_group.py +++ b/network/a10/a10_service_group.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb service-group objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 - When a server doesn't exist and is added to the service-group the server will be created From a6cd9865337099e8f5f682b856dc9560bd3566ae Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:25:25 -0400 Subject: [PATCH 289/720] Adding author's github id --- network/a10/a10_virtual_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py index 3d807c098cf..1387813c91d 100644 --- a/network/a10/a10_virtual_server.py +++ b/network/a10/a10_virtual_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: Mischa Peters +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 requirements: From c859c8a61df1c1cf459a451ae5966d7b9061b98b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:27:49 -0400 Subject: [PATCH 290/720] Adding author's github id --- network/citrix/netscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index b2f87aa0d08..a23eef15c65 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -82,7 +82,7 @@ options: choices: ['yes', 'no'] requirements: [ "urllib", "urllib2" ] -author: Nandor Sivok +author: "Nandor Sivok (@dominis)" ''' EXAMPLES = ''' From b8e648536a3a99ce28fe760fc6795ad5e740e610 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:28:43 -0400 Subject: [PATCH 291/720] Adding author's github id --- network/f5/bigip_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index f74c66b6036..0ba650dcdb9 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" -author: Matt Hite +author: "Matt Hite (@mhite) " notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From 321ad3b474d0b75793eb94c3b90f19d8cd79cf30 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:29:08 -0400 Subject: [PATCH 292/720] Adding author's github id --- network/f5/bigip_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 0ba650dcdb9..3ea92ef0049 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" -author: "Matt Hite (@mhite) " +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From 2440e230e5cb17538e49e947f1a2c5a94c2f0d98 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 17:30:26 -0400 Subject: [PATCH 293/720] Adding author's github id --- network/f5/bigip_monitor_http.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index d131eb71eee..1fabc5ebd63 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors" description: - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" version_added: "1.4" -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From 9800126a6e19154831c784fb1384071371f99d51 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 18:03:06 -0400 Subject: [PATCH 294/720] Adding author's github id --- network/f5/bigip_monitor_tcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 5cc00fe6b68..95a35ed61c8 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From 7ef07893ade1312b65c41d57e836f438471477d1 Mon Sep 17 00:00:00 2001 From: Willy Barro Date: Wed, 13 May 2015 19:13:38 -0300 Subject: [PATCH 295/720] Remove changed=True from pushbullet module By convention, notification modules should not return changed=True --- notification/pushbullet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/pushbullet.py b/notification/pushbullet.py index d89c79ec941..ffc2b32232b 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -24,7 +24,7 @@ module: pushbullet short_description: Sends notifications to Pushbullet description: - This module sends push notifications via Pushbullet to channels or devices. -version_added: "1.8" +version_added: "2.0" options: api_key: description: @@ -164,7 +164,7 @@ def main(): success, result = target.push_note(title, body) if success: - module.exit_json(changed=True, msg="OK") + module.exit_json(changed=False, msg="OK") # General failure module.fail_json(msg="Some error ocurred, Pushbullet response: %s" % (result)) From 318983ee5366ec1846634fc1652c1c43d7fbf8c3 Mon Sep 17 00:00:00 2001 From: Willy Barro Date: Wed, 13 May 2015 19:22:46 -0300 Subject: [PATCH 296/720] Replace manual option check to mutually_exclusive AnsibleModule option Also return the message "OK" when in check mode to use the same message as the original success return. --- notification/pushbullet.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notification/pushbullet.py b/notification/pushbullet.py index ffc2b32232b..5b255b4b549 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -114,6 +114,9 @@ def main(): title = dict(type='str', required=True), body = dict(type='str', default=None) ), + mutually_exclusive = ( + ['channel', 'device'], + ), supports_check_mode=True ) @@ -132,9 +135,6 @@ def main(): target = None # Checks for channel/device - if device is not None and channel is not None: - module.fail_json(msg="You can't use both device and channel at the same time.") - if device is None and channel is None: module.fail_json(msg="You need to provide a channel or a device.") @@ -158,7 +158,7 @@ def main(): # If in check mode, exit saying that we succeeded if module.check_mode: - module.exit_json(changed=False) + module.exit_json(changed=False, msg="OK") # Send push notification success, result = target.push_note(title, body) From 3f2c4426ae7f234dfc1927a2c6507e42e4438261 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Wed, 13 May 2015 23:28:11 +0100 Subject: [PATCH 297/720] Added GPL license + Don't log password Implemented resmo suggestions. --- messaging/rabbitmq_binding.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index 731e52ba969..2d34f0c71bf 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -1,9 +1,29 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' +--- module: rabbitmq_binding author: Manuel Sousa -version_added: 2.0 +version_added: "2.0" short_description: This module manages rabbitMQ bindings description: @@ -89,7 +109,7 @@ def main(): state = dict(default='present', choices=['present', 'absent'], type='str'), name = dict(required=True, aliases=[ "src", "source" ], type='str'), login_user = dict(default='guest', type='str'), - login_password = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), login_host = dict(default='localhost', type='str'), login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), From 180fb13b2849126024228750193188e60afad7cb Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Wed, 13 May 2015 23:36:41 +0100 Subject: [PATCH 298/720] Added GPL license + Don't log password Implemented resmo suggestions. --- messaging/rabbitmq_exchange.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index b7dbd00be04..f184acc024e 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -1,9 +1,29 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' +--- module: rabbitmq_exchange author: Manuel Sousa -version_added: 2.0 +version_added: "2.0" short_description: This module manages rabbitMQ exchanges description: @@ -95,7 +115,7 @@ def main(): state = dict(default='present', choices=['present', 'absent'], type='str'), name = dict(required=True, type='str'), login_user = dict(default='guest', type='str'), - login_password = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), login_host = dict(default='localhost', type='str'), login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), From 2fa269b01964fb8b447098684e8979b11a7f0840 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Wed, 13 May 2015 23:38:53 +0100 Subject: [PATCH 299/720] Added GPL license + Don't log password Implemented resmo suggestions. --- messaging/rabbitmq_queue.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 34d107e3856..ac5743fef55 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -1,9 +1,29 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Manuel Sousa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' +--- module: rabbitmq_queue author: Manuel Sousa -version_added: 2.0 +version_added: "2.0" short_description: This module manages rabbitMQ queues description: @@ -104,7 +124,7 @@ def main(): state = dict(default='present', choices=['present', 'absent'], type='str'), name = dict(required=True, type='str'), login_user = dict(default='guest', type='str'), - login_password = dict(default='guest', type='str'), + login_password = dict(default='guest', type='str', no_log=True), login_host = dict(default='localhost', type='str'), login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), From 4996f6461276fcb2fd0507720ecd25f709c11d81 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:12:39 -0400 Subject: [PATCH 300/720] Adding author's github id --- network/f5/bigip_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index ca212763881..a24c5df449e 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes" description: - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" version_added: "1.4" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From a564122100af33c26f663decdc4427f7c210c9a5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:16:34 -0400 Subject: [PATCH 301/720] Adding author's github id --- network/f5/bigip_pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 425c1e97149..3347ef55861 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools" description: - "Manages F5 BIG-IP LTM pools via iControl SOAP API" version_added: "1.2" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From cab4a24653d0d6a0213f228a023551d6dc7c4a4e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:18:03 -0400 Subject: [PATCH 302/720] Adding author's github id --- network/f5/bigip_pool_member.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 11ae9f35646..f20deb223a5 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members" description: - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" version_added: "1.4" -author: Matt Hite +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" From 3ece7b153e91b976aaa93d04bd70fd0a88d698a4 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:19:17 -0400 Subject: [PATCH 303/720] Adding author's github id --- network/dnsimple.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsimple.py b/network/dnsimple.py index 9aa52172f19..5cecfbd8169 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -93,7 +93,7 @@ options: default: null requirements: [ dnsimple ] -author: Alex Coomans +author: "Alex Coomans (@drcapulet)" ''' EXAMPLES = ''' From 6e1077eabc36575a6822be7e2a2c684c357d8bbb Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:20:31 -0400 Subject: [PATCH 304/720] Adding author's github id --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 148e25a5011..dc70d0e5569 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -87,7 +87,7 @@ notes: - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. requirements: [ urllib, urllib2, hashlib, hmac ] -author: Brice Burgess +author: "Brice Burgess (@briceburg)" ''' EXAMPLES = ''' From bc64d4654f68a6fda89a97a12613a79ee74b4258 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:21:32 -0400 Subject: [PATCH 305/720] Adding author's github id --- network/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/haproxy.py b/network/haproxy.py index 38757599df5..c897349019e 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -91,7 +91,7 @@ examples: # enable server in 'www' backend pool with change server(s) weight - haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www -author: Ravi Bhure +author: "Ravi Bhure (@ravibhure)" ''' import socket From 79ee13b542be975a450eeeb2ff2783edb4ec127e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:22:21 -0400 Subject: [PATCH 306/720] Adding author's github id --- network/lldp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/lldp.py b/network/lldp.py index ea6dc78d7bc..3ed554f79c3 100755 --- a/network/lldp.py +++ b/network/lldp.py @@ -24,7 +24,7 @@ short_description: get details reported by lldp description: - Reads data out of lldpctl options: {} -author: Andy Hill +author: "Andy Hill (@andyhky)" notes: - Requires lldpd running and lldp enabled on switches ''' From 4f40f91b78fe3a4dd40fa5028e7f070b4ab9a67d Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:24:01 -0400 Subject: [PATCH 307/720] Adding author's github id --- network/openvswitch_bridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py index 551ca707a2d..6b8119f5875 100644 --- a/network/openvswitch_bridge.py +++ b/network/openvswitch_bridge.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_bridge version_added: 1.4 -author: David Stygstra +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch bridges requirements: [ ovs-vsctl ] description: From 4aec44b861a9f28c5ff396aa7a9a9c0fa13a8023 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:25:07 -0400 Subject: [PATCH 308/720] Adding author's github id --- network/openvswitch_port.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index 66391937d1b..028300d6b70 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_port version_added: 1.4 -author: David Stygstra +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch ports requirements: [ ovs-vsctl ] description: From 41589b86437f3b9efa3ab625f29fe9c8d6fdfa0e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:26:18 -0400 Subject: [PATCH 309/720] Formatting to author field --- network/snmp_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/snmp_facts.py b/network/snmp_facts.py index 85fc148cba5..510ffbf9add 100755 --- a/network/snmp_facts.py +++ b/network/snmp_facts.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: snmp_facts version_added: "1.9" -author: Patrick Ogenstad (@networklore) +author: "Patrick Ogenstad (@networklore)" short_description: Retrive facts for a device using SNMP. description: - Retrieve facts for a device using SNMP, the facts will be From 14853a20f31b9cd7196cdb10a7b84168587dbc26 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 19:30:11 -0400 Subject: [PATCH 310/720] Replacing twitter id with github id --- network/snmp_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/snmp_facts.py b/network/snmp_facts.py index 510ffbf9add..81a91ee6eb2 100755 --- a/network/snmp_facts.py +++ b/network/snmp_facts.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: snmp_facts version_added: "1.9" -author: "Patrick Ogenstad (@networklore)" +author: "Patrick Ogenstad (@ogenstad)" short_description: Retrive facts for a device using SNMP. description: - Retrieve facts for a device using SNMP, the facts will be From b285bb072f5655fa293d12f00651e01c996f27c0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 19:34:05 -0400 Subject: [PATCH 311/720] Revert "Add enabled/disabled support to bigip_node" --- network/f5/bigip_node.py | 64 ++-------------------------------------- 1 file changed, 3 insertions(+), 61 deletions(-) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index a24c5df449e..77f0fe470c7 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -67,7 +67,7 @@ options: - Pool member state required: true default: present - choices: ['present', 'absent', 'enabled', 'disabled'] + choices: ['present', 'absent'] aliases: [] partition: description: @@ -78,7 +78,7 @@ options: aliases: [] name: description: - - "Node name. Required when state=enabled/disabled" + - "Node name" required: false default: null choices: [] @@ -145,11 +145,6 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" - - name: Disable node - bigip_node: server=lb.mydomain.com user=admin password=mysecret - state=disabled name=mynodename - delegate_to: localhost - ''' try: @@ -163,13 +158,6 @@ else: # bigip_node module specific # -# map of state values -STATES={'enabled': 'STATE_ENABLED', - 'disabled': 'STATE_DISABLED'} -STATUSES={'enabled': 'SESSION_STATUS_ENABLED', - 'disabled': 'SESSION_STATUS_DISABLED', - 'offline': 'SESSION_STATUS_FORCED_DISABLED'} - def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api @@ -232,25 +220,6 @@ def set_node_description(api, name, description): def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] -def set_node_disabled(api, name): - set_node_session_enabled_state(api, name, STATES['disabled']) - result = True - desc = "" - return (result, desc) - -def set_node_enabled(api, name): - set_node_session_enabled_state(api, name, STATES['enabled']) - result = True - desc = "" - return (result, desc) - -def set_node_session_enabled_state(api, name, state): - api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], - states=[state]) - -def get_node_session_status(api, name): - return api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] - def main(): module = AnsibleModule( argument_spec = dict( @@ -258,8 +227,7 @@ def main(): user = dict(type='str', required=True), password = dict(type='str', required=True), validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', - choices=['present', 'absent', 'disabled', 'enabled']), + state = dict(type='str', default='present', choices=['present', 'absent']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), @@ -334,32 +302,6 @@ def main(): set_node_description(api, address, description) result = {'changed': True} - elif state in ('disabled', 'enabled'): - if name is None: - module.fail_json(msg="name parameter required when " \ - "state=enabled/disabled") - if not module.check_mode: - if not node_exists(api, name): - module.fail_json(msg="node does not exist") - status = get_node_session_status(api, name) - if state == 'disabled': - if status not in (STATUSES['disabled'], STATUSES['offline']): - disabled, desc = set_node_disabled(api, name) - if not disabled: - module.fail_json(msg="unable to disable: %s" % desc) - else: - result = {'changed': True} - else: - if status != STATUSES['enabled']: - enabled, desc = set_node_enabled(api, name) - if not enabled: - module.fail_json(msg="unable to enable: %s" % desc) - else: - result = {'changed': True} - else: - # check-mode return value - result = {'changed': True} - except Exception, e: module.fail_json(msg="received exception: %s" % e) From a4232d657d29e5bef53a89aa56895ffcb970f554 Mon Sep 17 00:00:00 2001 From: Willy Barro Date: Wed, 13 May 2015 20:45:50 -0300 Subject: [PATCH 312/720] Handle invalid api key and general api errors on pushbullet pushbullet.py module has changed it's API and return types so we're now handling these exceptions. --- notification/pushbullet.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/notification/pushbullet.py b/notification/pushbullet.py index 5b255b4b549..5e758507279 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -95,6 +95,7 @@ EXAMPLES = ''' try: from pushbullet import PushBullet + from pushbullet.errors import InvalidKeyError, PushError except ImportError: pushbullet_found = False else: @@ -131,8 +132,11 @@ def main(): module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py") # Init pushbullet - pb = PushBullet(api_key) - target = None + try: + pb = PushBullet(api_key) + target = None + except InvalidKeyError: + module.fail_json(msg="Invalid api_key") # Checks for channel/device if device is None and channel is None: @@ -161,13 +165,13 @@ def main(): module.exit_json(changed=False, msg="OK") # Send push notification - success, result = target.push_note(title, body) - - if success: + try: + target.push_note(title, body) module.exit_json(changed=False, msg="OK") + except PushError as e: + module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) - # General failure - module.fail_json(msg="Some error ocurred, Pushbullet response: %s" % (result)) + module.fail_json(msg="An unknown error has occurred") # import module snippets from ansible.module_utils.basic import * From a2daad00f845e4803c0631b366276e5a5a4f4dad Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 13 May 2015 19:45:32 -0500 Subject: [PATCH 313/720] Ignore pushbullet.py from 2.4 compat checks --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index cf64440ae3c..7fda5b98133 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,5 +7,5 @@ addons: packages: - python2.4 script: - - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman.py|/maven_artifact.py|clustering/consul.*\.py' . + - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - python -m compileall -fq . From 612ba118e090d2d98c622db161d5b6cad2873d11 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 21:58:23 -0400 Subject: [PATCH 314/720] Adding author's github id --- notification/campfire.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/campfire.py b/notification/campfire.py index 31e69fc5459..f302b65bc8d 100644 --- a/notification/campfire.py +++ b/notification/campfire.py @@ -43,7 +43,7 @@ options: # informational: requirements for nodes requirements: [ urllib2, cgi ] -author: Adam Garside +author: "Adam Garside (@fabulops)" ''' EXAMPLES = ''' From c3db70df10e10db1a311554153d969af18636175 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:01:52 -0400 Subject: [PATCH 315/720] Adding author's github id --- notification/flowdock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/flowdock.py b/notification/flowdock.py index 009487fb438..41a23434012 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: flowdock version_added: "1.2" -author: Matt Coddington +author: "Matt Coddington (@mcodd)" short_description: Send a message to a flowdock description: - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) From 7cf6900008876336153833a0eb761193481f082b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:03:04 -0400 Subject: [PATCH 316/720] Adding author's github id --- notification/grove.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/grove.py b/notification/grove.py index 8f4ec42be58..d705c000012 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -39,7 +39,7 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 -author: Jonas Pfenniger +author: "Jonas Pfenniger (@zimbatm)" ''' EXAMPLES = ''' From a5ea6a611bc7c6953d6352dbb6c86d68c739beb1 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:04:56 -0400 Subject: [PATCH 317/720] Adding author's github id --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 060babf08d8..44b3dfb156f 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -64,7 +64,7 @@ options: # informational: requirements for nodes requirements: [ urllib, urllib2 ] -author: WAKAYAMA Shirou +author: "WAKAYAMA Shirou (@shirou)" ''' EXAMPLES = ''' From 51b1c843bb46aa24143cbc31a2f27a4d2b4bad96 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:08:00 -0400 Subject: [PATCH 318/720] Adding author's github id --- notification/irc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/notification/irc.py b/notification/irc.py index a90834f820d..b55e3ec7c42 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -80,7 +80,9 @@ options: # informational: requirements for nodes requirements: [ socket ] -author: Jan-Piet Mens, Matt Martz +author: + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" ''' EXAMPLES = ''' From c5ec2e2f2439cbada70e6c1a2eb54976eb9355a1 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:09:07 -0400 Subject: [PATCH 319/720] Adding author's github id --- notification/jabber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/jabber.py b/notification/jabber.py index 8a7eed37b33..466c72d1570 100644 --- a/notification/jabber.py +++ b/notification/jabber.py @@ -42,7 +42,7 @@ options: # informational: requirements for nodes requirements: [ xmpp ] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' From ee5363930f38a79c0eee7c1da907c422d1b31533 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:10:17 -0400 Subject: [PATCH 320/720] Adding author's github id --- notification/mail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/mail.py b/notification/mail.py index 6dc3da533e6..89f1b36a0ad 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ --- -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" module: mail short_description: Send an email description: From 7c51f6f891e4fe542173f34dd05a5800a8f0e205 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:11:22 -0400 Subject: [PATCH 321/720] Adding author's github id --- notification/mqtt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/mqtt.py b/notification/mqtt.py index d701bd9348a..c618ab69ae3 100644 --- a/notification/mqtt.py +++ b/notification/mqtt.py @@ -81,7 +81,7 @@ requirements: [ mosquitto ] notes: - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). -author: Jan-Piet Mens +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES = ''' From 9b051b8a96ed10c5e11ad37679e68ea524089722 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:27:43 -0400 Subject: [PATCH 322/720] Adding author's github id --- notification/nexmo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/nexmo.py b/notification/nexmo.py index d4898c40cdb..0726b65cf83 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo description: - Send a SMS message via nexmo version_added: 1.6 -author: Matt Martz +author: "Matt Martz (@sivel)" options: api_key: description: From 79d7cdecd8455bd1e7a7290c7f944e63e1faf074 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:30:09 -0400 Subject: [PATCH 323/720] Adding author's github id --- notification/osx_say.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/notification/osx_say.py b/notification/osx_say.py index 39e3da88c19..7c0ba844583 100644 --- a/notification/osx_say.py +++ b/notification/osx_say.py @@ -37,7 +37,9 @@ options: What voice to use required: false requirements: [ say ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' From 27e5950fd09e277bb993a6cf22a143f142f55d98 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:31:48 -0400 Subject: [PATCH 324/720] Adding author's github id --- notification/pushbullet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/pushbullet.py b/notification/pushbullet.py index 5e758507279..f7b7172d413 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -18,7 +18,7 @@ DOCUMENTATION = ''' --- -author: Willy Barro +author: "Willy Barro (@willybarro)" requirements: [ pushbullet.py ] module: pushbullet short_description: Sends notifications to Pushbullet From fd0c21df27da5c952ef71b69fadc1d74cb052eea Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Wed, 13 May 2015 21:20:05 -0500 Subject: [PATCH 325/720] Updated lxc module to remove the clone state The clone state was removed in favor of making the module more declarative. This change was done in response to review in PR #328 from @bcoca. In the commit new examples were created on how this feature works. --- cloud/lxc/lxc_container.py | 221 ++++++++++++++++++++----------------- 1 file changed, 121 insertions(+), 100 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 74c55357501..409695a2e8d 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -156,12 +156,12 @@ options: - restarted - absent - frozen - - clone description: - - Define the state of a container. If you use clone the container - will be stopped while the clone operation is happening and upon - completion of the clone the original container state will be - restored. + - Define the state of a container. If you clone a container using + `clone_name` the newly cloned container created in a stopped state. + The running container will be stopped while the clone operation is + happening and upon completion of the clone the original container + state will be restored. required: false default: started container_config: @@ -225,6 +225,7 @@ EXAMPLES = """ - name: Create filesystem container lxc_container: name: test-container-config + backing_store: dir container_log: true template: ubuntu state: started @@ -238,7 +239,7 @@ EXAMPLES = """ # Create an lvm container, run a complex command in it, add additional # configuration to it, create an archive of it, and finally leave the container # in a frozen state. The container archive will be compressed using bzip2 -- name: Create an lvm container +- name: Create a frozen lvm container lxc_container: name: test-container-lvm container_log: true @@ -263,14 +264,6 @@ EXAMPLES = """ - name: Debug info on container "test-container-lvm" debug: var=lvm_container_info -- name: Get information on a given container. - lxc_container: - name: test-container-config - register: config_container_info - -- name: debug info on container "test-container" - debug: var=config_container_info - - name: Run a command in a container and ensure its in a "stopped" state. lxc_container: name: test-container-started @@ -285,19 +278,19 @@ EXAMPLES = """ container_command: | echo 'hello world.' | tee /opt/frozen -- name: Start a container. +- name: Start a container lxc_container: name: test-container-stopped state: started -- name: Run a command in a container and then restart it. +- name: Run a command in a container and then restart it lxc_container: name: test-container-started state: restarted container_command: | echo 'hello world.' | tee /opt/restarted -- name: Run a complex command within a "running" container. +- name: Run a complex command within a "running" container lxc_container: name: test-container-started container_command: | @@ -317,7 +310,10 @@ EXAMPLES = """ archive: true archive_path: /opt/archives -- name: Create an overlayfs container +# Create a container using overlayfs, create an archive of it, create a +# snapshot clone of the container and and finally leave the container +# in a frozen state. The container archive will be compressed using gzip. +- name: Create an overlayfs container archive and clone it lxc_container: name: test-container-overlayfs container_log: true @@ -325,40 +321,42 @@ EXAMPLES = """ state: started backing_store: overlayfs template_options: --release trusty + clone_snapshot: true + clone_name: test-container-overlayfs-clone-snapshot + archive: true + archive_compression: gzip + register: clone_container_info -- name: Clone a container - lxc_container: - name: test-container-overlayfs - clone_name: test-container-clone - state: clone +- name: debug info on container "test-container" + debug: var=clone_container_info -- name: Clone a container using snapshot. +- name: Clone a container using snapshot lxc_container: - name: test-container-overlayfs - clone_name: test-container-overlayfs-clone + name: test-container-overlayfs-clone-snapshot backing_store: overlayfs + clone_name: test-container-overlayfs-clone-snapshot2 clone_snapshot: true - state: clone - name: Create a new container and clone it lxc_container: - name: test-container-new-overlayfs - clone_name: test-container-new-overlayfs-clone - backing_store: overlayfs - clone_snapshot: true - state: clone + name: test-container-new-archive + backing_store: dir + clone_name: test-container-new-archive-clone -- name: Create a new container, clone it, and archive +- name: Archive and clone a container then destroy it lxc_container: - name: test-container-new-overlayfs - clone_name: test-container-new-overlayfs-clone - backing_store: overlayfs - clone_snapshot: true - state: clone + name: test-container-new-archive + state: absent + clone_name: test-container-new-archive-destroyed-clone archive: true archive_compression: gzip -- name: Destroy a container. +- name: Start a cloned container. + lxc_container: + name: test-container-new-archive-destroyed-clone + state: started + +- name: Destroy a container lxc_container: name: "{{ item }}" state: absent @@ -369,8 +367,12 @@ EXAMPLES = """ - test-container-lvm - test-container-config - test-container-overlayfs - - test-container-clone - test-container-overlayfs-clone + - test-container-overlayfs-clone-snapshot + - test-container-overlayfs-clone-snapshot2 + - test-container-new-archive + - test-container-new-archive-clone + - test-container-new-archive-destroyed-clone """ @@ -518,18 +520,16 @@ def create_script(command): f.close() # Ensure the script is executable. - os.chmod(script_file, 0755) + os.chmod(script_file, 1755) # Get temporary directory. tempdir = tempfile.gettempdir() # Output log file. - stdout = path.join(tempdir, 'lxc-attach-script.log') - stdout_file = open(stdout, 'ab') + stdout_file = open(path.join(tempdir, 'lxc-attach-script.log'), 'ab') # Error log file. - stderr = path.join(tempdir, 'lxc-attach-script.err') - stderr_file = open(stderr, 'ab') + stderr_file = open(path.join(tempdir, 'lxc-attach-script.err'), 'ab') # Execute the script command. try: @@ -561,6 +561,7 @@ class LxcContainerManagement(object): self.container_name = self.module.params['name'] self.container = self.get_container_bind() self.archive_info = None + self.clone_info = None def get_container_bind(self): return lxc.Container(name=self.container_name) @@ -735,7 +736,7 @@ class LxcContainerManagement(object): self._container_startup() self.container.freeze() - def _clone(self, count=0): + def _container_create_clone(self): """Clone a new LXC container from an existing container. This method will clone an existing container to a new container using @@ -751,66 +752,51 @@ class LxcContainerManagement(object): state. """ - self.check_count(count=count, method='clone') - if self._container_exists(container_name=self.container_name): - # Ensure that the state of the original container is stopped - container_state = self._get_state() - if container_state != 'stopped': - self.state_change = True - self.container.stop() + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() - build_command = [ - self.module.get_bin_path('lxc-clone', True), - ] + build_command = [ + self.module.get_bin_path('lxc-clone', True), + ] - build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['clone']['variables'] - ), - build_command=build_command - ) + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone']['variables'] + ), + build_command=build_command + ) - # Load logging for the instance when creating it. - if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: - build_command.append('--snapshot') + # Load logging for the instance when creating it. + if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: + build_command.append('--snapshot') + # Check for backing_store == overlayfs if so force the use of snapshot + # If overlay fs is used and snapshot is unset the clone command will + # fail with an unsupported type. + elif self.module.params.get('backing_store') == 'overlayfs': + build_command.append('--snapshot') - rc, return_data, err = self._run_command(build_command) - if rc != 0: - message = "Failed executing lxc-clone." - self.failure( - err=err, rc=rc, msg=message, command=' '.join( - build_command - ) + rc, return_data, err = self._run_command(build_command) + if rc != 0: + message = "Failed executing lxc-clone." + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command ) - else: - self.state_change = True - # Restore the original state of the origin container if it was - # not in a stopped state. - if container_state == 'running': - self.container.start() - elif container_state == 'frozen': - self.container.start() - self.container.freeze() - - # Change the container name context to the new cloned container - # This enforces that the state of the new cloned container will be - # "stopped". - self.state = 'stopped' - self.container_name = self.module.params['clone_name'] - self.container = self.get_container_bind() - - # Return data - self._execute_command() - - # Perform any configuration updates - self._config() - - # Check if the container needs to have an archive created. - self._check_archive() + ) else: - self._create() - count += 1 - self._clone(count) + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + return True def _create(self): """Create a new LXC container. @@ -965,6 +951,23 @@ class LxcContainerManagement(object): 'archive': self._container_create_tar() } + def _check_clone(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + clone_name = self.module.params.get('clone_name') + if clone_name: + if not self._container_exists(container_name=clone_name): + self.clone_info = { + 'cloned': self._container_create_clone() + } + else: + self.clone_info = { + 'cloned': False + } + def _destroyed(self, timeout=60): """Ensure a container is destroyed. @@ -979,6 +982,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + # Check if the container is to be cloned + self._check_clone() + if self._get_state() != 'stopped': self.state_change = True self.container.stop() @@ -1028,6 +1034,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -1055,6 +1064,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -1082,6 +1094,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -1122,6 +1137,9 @@ class LxcContainerManagement(object): # Check if the container needs to have an archive created. self._check_archive() + + # Check if the container is to be cloned + self._check_clone() else: self._create() count += 1 @@ -1581,6 +1599,9 @@ class LxcContainerManagement(object): if self.archive_info: outcome.update(self.archive_info) + if self.clone_info: + outcome.update(self.clone_info) + self.module.exit_json( changed=self.state_change, lxc_container=outcome From 43465e1af2051c4dddaaa7eeb9f9a24ef03b62fe Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:32:52 -0400 Subject: [PATCH 326/720] Adding author's github id --- notification/pushover.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/pushover.py b/notification/pushover.py index 3e710ca02dd..c4d1333e36c 100644 --- a/notification/pushover.py +++ b/notification/pushover.py @@ -48,7 +48,7 @@ options: description: Message priority (see u(https://pushover.net) for details.) required: false -author: Jim Richardson +author: "Jim Richardson (@weaselkeeper)" ''' EXAMPLES = ''' From aefba2c00a51662e4ede097567983f71c3463e72 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 22:35:13 -0400 Subject: [PATCH 327/720] Adding author's github id --- notification/sendgrid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/sendgrid.py b/notification/sendgrid.py index d8bfb7d6a2e..4893fea7fe3 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -53,7 +53,7 @@ options: the desired subject for the email required: true -author: Matt Makai +author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' From 5180a64745e12f39b3f2d64830a5109ea0539dd6 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:44:25 -0400 Subject: [PATCH 328/720] Adding author's github id --- notification/slack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/slack.py b/notification/slack.py index 19af9d9d093..61d01c7d443 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -24,7 +24,7 @@ short_description: Send Slack notifications description: - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration version_added: 1.6 -author: Ramon de la Fuente +author: "Ramon de la Fuente (@ramondelafuente)" options: domain: description: From 173f06ea18c82d18e75699465bb8c91618434eb7 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:45:01 -0400 Subject: [PATCH 329/720] Adding author's github id --- notification/sns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/sns.py b/notification/sns.py index 54421b0e9fa..16b02e66486 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages description: - The M(sns) module sends notifications to a topic on your Amazon SNS account version_added: 1.6 -author: Michael J. Schultz +author: "Michael J. Schultz (@mjschultz)" options: msg: description: From 0d20c00314ea7da42dca671da029e16a5e058952 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:46:11 -0400 Subject: [PATCH 330/720] Adding author's github id --- notification/twilio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/twilio.py b/notification/twilio.py index faae7b6f58f..44366158ee1 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -58,7 +58,7 @@ options: (multimedia message) instead of a plain SMS required: false -author: Matt Makai +author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' From 380a1ddf04cd2ee06f7efe31f4880a1fc712a061 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:47:03 -0400 Subject: [PATCH 331/720] Adding author's github id --- notification/typetalk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/typetalk.py b/notification/typetalk.py index b987acbe837..1afd8d47045 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -26,7 +26,7 @@ options: - message body required: true requirements: [ urllib, urllib2, json ] -author: Takashi Someda +author: "Takashi Someda (@tksmd)" ''' EXAMPLES = ''' From 3c4b1afea91dc65a3c5e235dcd45f5d32aadc68f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:48:08 -0400 Subject: [PATCH 332/720] Adding author's github id --- packaging/language/bower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 085f454e639..3687be0c671 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -25,7 +25,7 @@ short_description: Manage bower packages with bower description: - Manage bower packages with bower version_added: 1.9 -author: Michael Warkentin +author: "Michael Warkentin (@mwarkentin)" options: name: description: From fc794ef0949427b17cdc340fd31b66fe91c6a3ec Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:48:56 -0400 Subject: [PATCH 333/720] Adding author's github id --- packaging/language/composer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index a24b826a4de..b6390bce4d9 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: composer -author: Dimitrios Tydeas Mengidis +author: "Dimitrios Tydeas Mengidis (@dmtrs)" short_description: Dependency Manager for PHP version_added: "1.6" description: From 12ab9d0043fa8e35846d61517a85609c0233afdb Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:49:40 -0400 Subject: [PATCH 334/720] Adding author's github id --- packaging/language/cpanm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index ec344b7aa9b..5d36ec30844 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -73,7 +73,7 @@ examples: description: Install I(Dancer) perl package from a specific mirror notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: Franck Cuny +author: "Franck Cuny (@franckcuny)" ''' def _is_package_installed(module, name, locallib, cpanm): From 66008490d4c128117a3df5fa7c325801fca60bee Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:50:23 -0400 Subject: [PATCH 335/720] Adding author's github id --- packaging/language/maven_artifact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index f5d8f185a55..81c120b90e6 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -37,7 +37,7 @@ description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not - available. -author: Chris Schmidt +author: "Chris Schmidt (@chrisisbeef)" requirements: - "python >= 2.6" - lxml From 75574d7713a5fcbd8c4b200bc345e2a10c89eaf3 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 13 May 2015 23:51:00 -0400 Subject: [PATCH 336/720] Adding author's github id --- packaging/language/npm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 8407589116a..d632eae1719 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -25,7 +25,7 @@ short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" options: name: description: From 8725fe7273907887859a6fc40da5a64c8cc35ecf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:07:56 -0400 Subject: [PATCH 337/720] Adding author's github id --- packaging/os/dnf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index 222fe4fa222..305b79067d3 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -93,7 +93,7 @@ options: notes: [] # informational: requirements for nodes requirements: [ dnf ] -author: Cristian van Ee +author: "Cristian van Ee (@DJMuggs)" ''' EXAMPLES = ''' From e58197efb6d19f23c07019e1100ae522f6832ae3 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:10:22 -0400 Subject: [PATCH 338/720] Adding author's github id --- packaging/os/homebrew.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index aac4efd827e..dd25732fa14 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -22,7 +22,9 @@ DOCUMENTATION = ''' --- module: homebrew -author: Andrew Dunham and Daniel Jaouen +author: + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham" (@andrew-d)" short_description: Package manager for Homebrew description: - Manages Homebrew packages From 21c06dcae3ef1fc1ccc942b590b50443143660f3 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:10:57 -0400 Subject: [PATCH 339/720] Typo in author --- packaging/os/homebrew.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index dd25732fa14..609682207d8 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' module: homebrew author: - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham" (@andrew-d)" + - "Andrew Dunham (@andrew-d)" short_description: Package manager for Homebrew description: - Manages Homebrew packages From 1c92b4f8cc3e0b73c5a80c5b5bfde6cf263e2503 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:11:30 -0400 Subject: [PATCH 340/720] Adding author's github id --- packaging/os/homebrew_cask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index 75acead517b..c4d74b5d133 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: homebrew_cask -author: Daniel Jaouen +author: "Daniel Jaouen (@danieljaouen)" short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. From 62888d291d6c321628f8c776684e7d37e0505d88 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:11:50 -0400 Subject: [PATCH 341/720] Adding author's github id --- packaging/os/homebrew_cask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index c4d74b5d133..9e20149300f 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: homebrew_cask -author: "Daniel Jaouen (@danieljaouen)" +author: "Daniel Jaouen (@danieljaouen)" short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. From 63513b5ebab06dd11cc06054415bde4ed521937f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:12:48 -0400 Subject: [PATCH 342/720] Adding author's github id --- packaging/os/homebrew_tap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index d329227b980..6402aa14ac1 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -24,7 +24,7 @@ import re DOCUMENTATION = ''' --- module: homebrew_tap -author: Daniel Jaouen +author: "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. From 33e1d3bd0b7a0afc1e1ac30070976a60ed176278 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:13:31 -0400 Subject: [PATCH 343/720] Adding author's github id --- packaging/os/layman.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/layman.py b/packaging/os/layman.py index b4830f8ec2c..b4155d13b68 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -25,7 +25,7 @@ from urllib2 import Request, urlopen, URLError DOCUMENTATION = ''' --- module: layman -author: Jakub Jirutka +author: "Jakub Jirutka (@jirutka)" version_added: "1.6" short_description: Manage Gentoo overlays description: From 265edb49926cb3ab4c2a618c013bba8639b19aaa Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:15:38 -0400 Subject: [PATCH 344/720] Adding author's github id --- packaging/os/macports.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/macports.py b/packaging/os/macports.py index ae7010b1cbd..ca3a0f97426 100644 --- a/packaging/os/macports.py +++ b/packaging/os/macports.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: macports -author: Jimmy Tang +author: "Jimmy Tang (@jcftang)" short_description: Package manager for MacPorts description: - Manages MacPorts packages From c03c80be88e16bd13371681c9ac8e61899a0d483 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:17:09 -0400 Subject: [PATCH 345/720] Adding author's github id --- packaging/os/openbsd_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 14b4ff46024..10bfbdc8f96 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -25,7 +25,7 @@ import syslog DOCUMENTATION = ''' --- module: openbsd_pkg -author: Patrik Lundin +author: "Patrik Lundin (@eest)" version_added: "1.1" short_description: Manage packages on OpenBSD. description: From 384c1523c3b1c8146b6af95a223299ca4f846658 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:18:41 -0400 Subject: [PATCH 346/720] Adding author's github id --- packaging/os/opkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py index 95afd6fd8bd..7f6c57a659c 100644 --- a/packaging/os/opkg.py +++ b/packaging/os/opkg.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: opkg -author: Patrick Pelletier +author: "Patrick Pelletier (@skinp)" short_description: Package manager for OpenWrt description: - Manages OpenWrt packages From 72ee99184aa53bf3241d4504876c0863b004421e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:21:51 -0400 Subject: [PATCH 347/720] Adding author's github id --- packaging/os/pacman.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index a91f8e3054d..8aefbef8b0f 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -27,7 +27,9 @@ description: - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. version_added: "1.0" -author: Afterburn +author: + - "Aaron Bull Schaefer (@elasticdog)" + - "Afterburn" notes: [] requirements: [] options: From 2004ae64ab961b7c697e0f217a50f901dcc08436 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:23:29 -0400 Subject: [PATCH 348/720] Adding author's github id --- packaging/os/pkg5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 6adbff7f331..be0e24214ea 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5 -author: Peter Oliver +author: "Peter Oliver (@mavit)" short_description: Manages packages with the Solaris 11 Image Packaging System version_added: 1.9 description: From 335ba5bf32f9d4c3f20016e3081659b140a6e148 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:23:59 -0400 Subject: [PATCH 349/720] Adding author's github id --- packaging/os/pkg5_publisher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 53d7ad821f2..43d00ce4b25 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5_publisher -author: Peter Oliver +author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers version_added: 1.9 description: From 33bdc7ce77bb1ce003168a0e4a786bccb03630e6 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:26:02 -0400 Subject: [PATCH 350/720] Adding author's github id --- packaging/os/pkgin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py index 9f25094210c..e1a973c2d30 100644 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -30,7 +30,9 @@ description: - "The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" -author: Shaun Zinck, Larry Gilbert +author: + - "Larry Gilbert (L2G)" + - "Shaun Zinck (@szinck)" notes: - "Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as From 9501714b8b42d5504fdc1202105e13258eb501fc Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:27:06 -0400 Subject: [PATCH 351/720] Adding author's github id --- packaging/os/pkgng.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index c54affbee22..91ee0743e10 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,7 +63,7 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false -author: bleader +author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. ''' From c18f4e55fc84d8784d5363f525cd834d9cb06321 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:30:02 -0400 Subject: [PATCH 352/720] Adding author's github id --- packaging/os/pkgutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 635617b4efe..a735b6d9ed0 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -32,7 +32,7 @@ description: - Pkgutil is an advanced packaging system, which resolves dependency on installation. It is designed for CSW packages. version_added: "1.3" -author: Alexander Winkler +author: "Alexander Winkler (@dermute)" options: name: description: From 922fc32d3d5818319c2cf42b92306849bea34e8d Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:31:16 -0400 Subject: [PATCH 353/720] Adding author's github id --- packaging/os/portage.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 124008c6522..2ce0379a8ec 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -147,7 +147,9 @@ options: choices: [ "yes" ] requirements: [ gentoolkit ] -author: Yap Sok Ann, Andrew Udvare +author: + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare" notes: [] ''' From 9bb0a010600393fd68634958da27a805016a9058 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:33:04 -0400 Subject: [PATCH 354/720] Adding author's github id --- packaging/os/portinstall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index 068f413af72..b4e3044167e 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -43,7 +43,7 @@ options: choices: [ 'yes', 'no' ] required: false default: yes -author: berenddeboer +author: "berenddeboer (@berenddeboer)" ''' EXAMPLES = ''' From 324476428309746fcaaf21d1fd289ebfe4ee2ee2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:33:21 -0400 Subject: [PATCH 355/720] Adding author's github id --- packaging/os/portinstall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index b4e3044167e..3c9d75767fd 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -43,7 +43,7 @@ options: choices: [ 'yes', 'no' ] required: false default: yes -author: "berenddeboer (@berenddeboer)" +author: "berenddeboer (@berenddeboer)" ''' EXAMPLES = ''' From 3d55887aa56b6305d23551fd68e9e2caee437dba Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:35:16 -0400 Subject: [PATCH 356/720] Adding author's github id --- packaging/os/svr4pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/svr4pkg.py b/packaging/os/svr4pkg.py index e95d4d8643f..51cda437e7f 100644 --- a/packaging/os/svr4pkg.py +++ b/packaging/os/svr4pkg.py @@ -30,7 +30,7 @@ description: - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. version_added: "0.9" -author: Boyd Adamson +author: "Boyd Adamson (@brontitall)" options: name: description: From 773bae9534dc1db87690549511d409c8cd211b43 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:36:02 -0400 Subject: [PATCH 357/720] Adding author's github id --- packaging/os/swdepot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py index b41a860531f..74c886ca75d 100644 --- a/packaging/os/swdepot.py +++ b/packaging/os/swdepot.py @@ -29,7 +29,7 @@ description: - Will install, upgrade and remove packages with swdepot package manager (HP-UX) version_added: "1.4" notes: [] -author: Raul Melo +author: "Raul Melo (@melodous)" options: name: description: From 73f9e5a1b9f462754c366f72f7186a974debf975 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:37:52 -0400 Subject: [PATCH 358/720] Adding author's github id --- packaging/os/urpmi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index 320d17bfc00..63fe6c7f8ef 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -57,7 +57,7 @@ options: required: false default: yes choices: [ "yes", "no" ] -author: Philippe Makowski +author: "Philippe Makowski (@pmakowski)" notes: [] ''' From a92f5b657a6a44ede113e769dc448a7f868e38e4 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:39:02 -0400 Subject: [PATCH 359/720] Adding author's github id --- packaging/os/zypper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index ccf901d4fa1..a1b2a391e99 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -31,7 +31,7 @@ import re DOCUMENTATION = ''' --- module: zypper -author: Patrick Callahan +author: "Patrick Callahan (@dirtyharrycallahan)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: From 16ce411f66cc6c56107d519c16a248ac2fe5d970 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:39:40 -0400 Subject: [PATCH 360/720] Adding author's github id --- packaging/os/zypper_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index f208305fe60..4853bdaff07 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: zypper_repository -author: Matthias Vogelgesang +author: "Matthias Vogelgesang (@matze)" version_added: "1.4" short_description: Add and remove Zypper repositories description: From adf7496d1984148b1aee4f07c0fe471a86008c8b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:40:46 -0400 Subject: [PATCH 361/720] Adding author's github id --- source_control/bzr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/bzr.py b/source_control/bzr.py index 0d25a026f7a..bf2c873fb81 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -22,7 +22,7 @@ DOCUMENTATION = u''' --- module: bzr -author: André Paramés +author: "André Paramés (@andreparames)" version_added: "1.1" short_description: Deploy software (or files) from bzr branches description: From 1170d8029cfac7ae02953389aeba5977704026b2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:41:41 -0400 Subject: [PATCH 362/720] Adding author's github id --- source_control/github_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py index 7aaff98f413..9184a7b8229 100644 --- a/source_control/github_hooks.py +++ b/source_control/github_hooks.py @@ -64,7 +64,7 @@ options: default: 'json' choices: ['json', 'form'] -author: Phillip Gentry, CX Inc +author: "Phillip Gentry, CX Inc (@pcgentry)" ''' EXAMPLES = ''' From 34d93e048816eac3ef106c4010f910206dc66b01 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:43:58 -0400 Subject: [PATCH 363/720] Adding author's github id --- system/alternatives.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/alternatives.py b/system/alternatives.py index 62669db9b14..c82eac951ad 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -30,6 +30,9 @@ description: - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" +author: + - "David Wittman (@DavidWittman)" + - "Gabe Mulley (@mulby)" options: name: description: From 4399d9feee996765d8c98ceba42a07b8c09bbcb9 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:45:08 -0400 Subject: [PATCH 364/720] Adding author's github id --- system/at.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/at.py b/system/at.py index 770148991f1..fb5fbdf6900 100644 --- a/system/at.py +++ b/system/at.py @@ -59,7 +59,7 @@ options: default: false requirements: - at -author: Richard Isaacson +author: "Richard Isaacson (@risaacson)" ''' EXAMPLES = ''' From 7b9d8baf76995df1a23ce501ff0d42706c0f0e6a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:46:37 -0400 Subject: [PATCH 365/720] Adding author's github id --- system/capabilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/capabilities.py b/system/capabilities.py index f4a9f62c0d0..c20cd3a9fff 100644 --- a/system/capabilities.py +++ b/system/capabilities.py @@ -50,7 +50,7 @@ notes: and flags to compare, so you will want to ensure that your capabilities argument matches the final capabilities. requirements: [] -author: Nate Coraor +author: "Nate Coraor (@natefoo)" ''' EXAMPLES = ''' From f65bb1c55e68cd62c5309321b74a93281b69f2b5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:47:26 -0400 Subject: [PATCH 366/720] Adding author's github id --- system/cronvar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/cronvar.py b/system/cronvar.py index 23a626472c3..fe337752d59 100755 --- a/system/cronvar.py +++ b/system/cronvar.py @@ -81,7 +81,7 @@ options: default: false requirements: - cron -author: Doug Luce +author: "Doug Luce (@dougluce)" """ EXAMPLES = ''' From 57ad76208d95185dc4f3f213561faa9c5292e700 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:48:29 -0400 Subject: [PATCH 367/720] Adding author's github id --- system/crypttab.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/crypttab.py b/system/crypttab.py index 52f3e75576a..8f2f563bdfd 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -69,7 +69,7 @@ options: notes: [] requirements: [] -author: Steve +author: "Steve (@groks)" ''' EXAMPLES = ''' From d1999aa7cf1908e38781463a588f2a355d239fd7 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:49:57 -0400 Subject: [PATCH 368/720] Adding author's github id --- system/debconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/debconf.py b/system/debconf.py index 0deaff25eb1..b249986a947 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -68,7 +68,7 @@ options: required: false default: False aliases: [] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' From c10896c401348015a09e029b99d96a326de11d5e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:50:42 -0400 Subject: [PATCH 369/720] Adding author's github id --- system/facter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/facter.py b/system/facter.py index a4912835447..6c09877fcbe 100644 --- a/system/facter.py +++ b/system/facter.py @@ -32,7 +32,9 @@ version_added: "0.2" options: {} notes: [] requirements: [ "facter", "ruby-json" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' From b08c630fe9364d21412b34057a29659afc3c17fa Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:58:26 -0400 Subject: [PATCH 370/720] Adding author's github id --- system/filesystem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/filesystem.py b/system/filesystem.py index 0de5b75e38b..3711a27024c 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" module: filesystem short_description: Makes file system on block device description: From e1385a9e8ba9e2a9ad96c7d18d3f138b20698f12 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 00:59:06 -0400 Subject: [PATCH 371/720] Adding author's github id --- system/firewalld.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/firewalld.py b/system/firewalld.py index 07cb8d224e7..efdd9611613 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -69,7 +69,7 @@ options: notes: - Not tested on any debian based system. requirements: [ firewalld >= 0.2.11 ] -author: Adam Miller +author: "Adam Miller (@maxamillion)" ''' EXAMPLES = ''' From cff5b5b5cccb50abead2b3d1095d084ae4802378 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:00:38 -0400 Subject: [PATCH 372/720] Adding author's github id --- system/getent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/getent.py b/system/getent.py index bb6d162398c..7df9e1d795f 100644 --- a/system/getent.py +++ b/system/getent.py @@ -54,7 +54,7 @@ options: notes: - "Not all databases support enumeration, check system documentation for details" requirements: [ ] -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' From f59b405e7aa94f8fc529414361ef59d43102e89b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:02:50 -0400 Subject: [PATCH 373/720] Adding author's github id --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 04a75d93ce0..cb554b74e1c 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -103,7 +103,7 @@ options: notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" -author: Taneli Leppä +author: "Taneli Leppä (@rosmo)" """ EXAMPLES = """ From 7f8e55a66a3589b72d71e9adaa6b17e30004fcd2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:03:29 -0400 Subject: [PATCH 374/720] Adding author's github id --- system/kernel_blacklist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py index 6af08c0788c..e1e8c8fcb4a 100644 --- a/system/kernel_blacklist.py +++ b/system/kernel_blacklist.py @@ -25,7 +25,7 @@ import re DOCUMENTATION = ''' --- module: kernel_blacklist -author: Matthias Vogelgesang +author: "Matthias Vogelgesang (@matze)" version_added: 1.4 short_description: Blacklist kernel modules description: From 3d4477ab0706ece410cd4d95fbbf8f0591ce6969 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:04:45 -0400 Subject: [PATCH 375/720] Adding author's github id --- system/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index 86876cd4931..2af84c07d96 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -51,7 +51,7 @@ options: required: no default: present requirements: [ ] -author: Matthew Vernon +author: "Matthew Vernon (@mcv21)" ''' EXAMPLES = ''' From 45478760e37a4877f78e25c64a0f5c9e6c04bd7b Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:06:16 -0400 Subject: [PATCH 376/720] Adding author's github id --- system/locale_gen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/locale_gen.py b/system/locale_gen.py index c4b2af7dc1b..9108cfb53cd 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -13,6 +13,7 @@ short_description: Creates or removes locales. description: - Manages locales by editing /etc/locale.gen and invoking locale-gen. version_added: "1.6" +author: "Augustus Kling (@AugustusKling)" options: name: description: From c90c3e77da4362ef9f8eb4613b80c3c8c8c5dcd2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:06:57 -0400 Subject: [PATCH 377/720] Adding author's github id --- system/lvg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvg.py b/system/lvg.py index 295ee24e3c6..3b3b8b5a7c7 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- -author: Alexander Bulimov +author: "Alexander Bulimov (@abulimov)" module: lvg short_description: Configure LVM volume groups description: From 2fdebb296acfcdf88c04e10a04b8624dc4327400 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:08:41 -0400 Subject: [PATCH 378/720] Adding author's github id --- system/lvol.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d9be9e7dc70..7fa483d318a 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -20,7 +20,9 @@ DOCUMENTATION = ''' --- -author: Jeroen Hoekx +author: + - "Jeroen Hoekx (@jhoekx)" + - "Alexander Bulimov (@abulimov)" module: lvol short_description: Configure LVM logical volumes description: From 88bebd353dcccd50177a7c8627216d8d4518ac7a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:09:41 -0400 Subject: [PATCH 379/720] Adding author's github id --- system/modprobe.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/system/modprobe.py b/system/modprobe.py index af845ae8cf5..f3b22209dd9 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -25,7 +25,10 @@ module: modprobe short_description: Add or remove kernel modules requirements: [] version_added: 1.4 -author: David Stygstra, Julien Dauphant, Matt Jeffery +author: + - "David Stygstra (@stygstra)" + - Julien Dauphant + - Matt Jeffery description: - Add or remove kernel modules. options: From 2002a235db0b6a1a91777a834c8b038cc97e48d7 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:10:43 -0400 Subject: [PATCH 380/720] Adding author's github id --- system/ohai.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/ohai.py b/system/ohai.py index b50abc9db03..6f066ec5ad8 100644 --- a/system/ohai.py +++ b/system/ohai.py @@ -32,7 +32,9 @@ version_added: "0.6" options: {} notes: [] requirements: [ "ohai" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' From 5e327de35373c55653f4c82a9ee943b0c2c9fa53 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:12:07 -0400 Subject: [PATCH 381/720] Adding author's github id --- system/open_iscsi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/open_iscsi.py b/system/open_iscsi.py index c661a723d77..aa9271bc259 100644 --- a/system/open_iscsi.py +++ b/system/open_iscsi.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: open_iscsi -author: Serge van Ginderachter +author: "Serge van Ginderachter (@srvg)" version_added: "1.4" short_description: Manage iscsi targets with open-iscsi description: From 2804ffd9253710c6c7c9bc8bfeedf5ba8901bc94 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:12:33 -0400 Subject: [PATCH 382/720] Adding author's github id --- system/svc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/svc.py b/system/svc.py index 04749cfc134..0227a69ecd8 100755 --- a/system/svc.py +++ b/system/svc.py @@ -4,7 +4,7 @@ DOCUMENTATION = ''' --- module: svc -author: Brian Coca +author: "Brian Coca (@bcoca)" version_added: short_description: Manage daemontools services. description: From 1c662556e91e6eaddc402e9ed4e6a6fe78c496cc Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:16:42 -0400 Subject: [PATCH 383/720] Adding author's github id --- system/ufw.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/system/ufw.py b/system/ufw.py index a49aa8c3a49..f9c1f3b57e9 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -28,7 +28,10 @@ short_description: Manage firewall with UFW description: - Manage firewall with UFW. version_added: 1.6 -author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik +author: + - "Aleksey Ovcharenko (@ovcharenko)" + - "Jarno Keskikangas (@pyykkis)" + - "Ahti Kitsik (@ahtik)" notes: - See C(man ufw) for more examples. requirements: From b232f143e49efe43c9f7bf390067ffb0704926fc Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:19:04 -0400 Subject: [PATCH 384/720] Adding author's github id --- system/zfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/zfs.py b/system/zfs.py index 93248897051..7a5cc205225 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -206,7 +206,7 @@ options: - The zoned property. required: False choices: ['on','off'] -author: Johan Wiren +author: "Johan Wiren (@johanwiren)" ''' EXAMPLES = ''' From 58cc8f8240578fefd51743a769eea6201b3ad701 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:20:25 -0400 Subject: [PATCH 385/720] Adding author's github id --- web_infrastructure/ejabberd_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py index bc54351e657..2031cd61b41 100755 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ejabberd_user version_added: "1.5" -author: Peter Sprygada +author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - ejabberd with mod_admin_extra From c76f37e53bd9e6c870a860e7be6f9350e7622e98 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:21:46 -0400 Subject: [PATCH 386/720] Adding author's github id --- web_infrastructure/jboss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py index 65b44d23047..781c60b00cd 100644 --- a/web_infrastructure/jboss.py +++ b/web_infrastructure/jboss.py @@ -47,7 +47,7 @@ options: notes: - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - "Ensure no identically named application is deployed through the JBoss CLI" -author: Jeroen Hoekx +author: "Jeroen Hoekx (@jhoekx)" """ EXAMPLES = """ From e687c6f115e52c246a2240000304902160b5f521 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:25:28 -0400 Subject: [PATCH 387/720] Adding author's github id --- web_infrastructure/jira.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py index 437a21b6769..b661185a316 100644 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -99,7 +99,7 @@ options: notes: - "Currently this only works with basic-auth." -author: Steve Smith +author: "Steve Smith (@tarka)" """ EXAMPLES = """ From 9a1e35e1db14fca0ffe940dd511bcf30fac7ee2d Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:29:21 -0400 Subject: [PATCH 388/720] Adding author's github id --- windows/win_chocolatey.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index 4df1f3c58e8..c46d212cc30 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -86,7 +86,9 @@ options: require: false default: c:\\ansible-playbook.log aliases: [] -author: Trond Hindenes, Peter Mounce +author: + - "Trond Hindenes (@trondhindenes)" + - "Peter Mounce (@petemounce)" ''' # TODO: From 8ec8cd182afef6729e6776a04694f433f6ec3f71 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:30:01 -0400 Subject: [PATCH 389/720] Adding author's github id --- windows/win_chocolatey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index c46d212cc30..ad0389aa398 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -88,7 +88,7 @@ options: aliases: [] author: - "Trond Hindenes (@trondhindenes)" - - "Peter Mounce (@petemounce)" + - "Peter Mounce (@petemounce)" ''' # TODO: From efd307903c39497d8ba11330eba1fe74f36bc1d5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 14 May 2015 01:30:45 -0400 Subject: [PATCH 390/720] Adding author's github id --- windows/win_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_updates.py b/windows/win_updates.py index 7eefd8ba331..49d2eac3a2e 100644 --- a/windows/win_updates.py +++ b/windows/win_updates.py @@ -41,7 +41,7 @@ options: - (anything that is a valid update category) default: critical aliases: [] -author: Peter Mounce +author: "Peter Mounce (@petemounce)" ''' EXAMPLES = ''' From 67ccbd3bc0fb18449a8abd3e500eec5d6f75ab1e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 14:06:43 -0400 Subject: [PATCH 391/720] more minor docfixes --- cloud/lxc/lxc_container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 409695a2e8d..a06cd09583b 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -114,11 +114,15 @@ options: required: false default: INFO clone_name: + version_added: "2.0" description: - Name of the new cloned server. This is only used when state is clone. required: false + default: false clone_snapshot: + version_added: "2.0" + required: false choices: - true - false From bceeba224f8ee25323de26e120eaa68fdb69a74f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 14 May 2015 09:19:04 -0400 Subject: [PATCH 392/720] fix doc parsing by correctly quoting author --- cloud/cloudstack/cs_account.py | 2 +- cloud/cloudstack/cs_affinitygroup.py | 2 +- cloud/cloudstack/cs_firewall.py | 2 +- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_instancegroup.py | 2 +- cloud/cloudstack/cs_iso.py | 2 +- cloud/cloudstack/cs_portforward.py | 2 +- cloud/cloudstack/cs_securitygroup.py | 2 +- cloud/cloudstack/cs_securitygroup_rule.py | 2 +- cloud/cloudstack/cs_sshkeypair.py | 2 +- cloud/cloudstack/cs_vmsnapshot.py | 2 +- cloud/google/gce_img.py | 2 +- cloud/lxc/lxc_container.py | 2 +- clustering/consul.py | 2 +- clustering/consul_acl.py | 2 +- clustering/consul_kv.py | 2 +- clustering/consul_session.py | 2 +- database/misc/mongodb_user.py | 2 +- database/mysql/mysql_replication.py | 2 +- monitoring/airbrake_deployment.py | 2 +- monitoring/boundary_meter.py | 2 +- monitoring/datadog_event.py | 2 +- monitoring/logentries.py | 4 ++-- monitoring/monit.py | 2 +- 24 files changed, 25 insertions(+), 25 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 014859cc12c..ccb29e1015f 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -25,7 +25,7 @@ short_description: Manages account on Apache CloudStack based clouds. description: - Create, disable, lock, enable and remove accounts. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index c083cd9dc21..1a11fb537db 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -25,7 +25,7 @@ short_description: Manages affinity groups on Apache CloudStack based clouds. description: - Create and remove affinity groups. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index d69ba12a3de..5c96d606e68 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -25,7 +25,7 @@ short_description: Manages firewall rules on Apache CloudStack based clouds. description: - Creates and removes firewall rules. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: ip_address: description: diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 864f7a14ed6..82d33725250 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -25,7 +25,7 @@ short_description: Manages instances and virtual machines on Apache CloudStack b description: - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index a9b35147691..71aa4bfa38b 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -25,7 +25,7 @@ short_description: Manages instance groups on Apache CloudStack based clouds. description: - Create and remove instance groups. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 0afdeebc8dc..1bdb2ee75cc 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -25,7 +25,7 @@ short_description: Manages ISOs images on Apache CloudStack based clouds. description: - Register and remove ISO images. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index dc714dfb65c..74519fccb28 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -25,7 +25,7 @@ short_description: Manages port forwarding rules on Apache CloudStack based clou description: - Create, update and remove port forwarding rules. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: ip_address: description: diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 2ce56ff8a56..88908e559e5 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -25,7 +25,7 @@ short_description: Manages security groups on Apache CloudStack based clouds. description: - Create and remove security groups. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index fdb566c08c6..100a92df4ef 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -25,7 +25,7 @@ short_description: Manages security group rules on Apache CloudStack based cloud description: - Add and remove security group rules. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: security_group: description: diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 8c38603ba1c..f24faee41d6 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -27,7 +27,7 @@ description: - If no key was found and no public key was provided and a new SSH private/public key pair will be created and the private key will be returned. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 868af820f99..fc11790579f 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -25,7 +25,7 @@ short_description: Manages VM snapshots on Apache CloudStack based clouds. description: - Create, remove and revert VM from snapshots. version_added: '2.0' -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' options: name: description: diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 466a0580721..9cc37f8eb33 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -81,7 +81,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud" -author: "Peter Tan (@tanpeter)" +author: '"Peter Tan (@tanpeter)" ' ''' EXAMPLES = ''' diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index a06cd09583b..119d45069c3 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -26,7 +26,7 @@ short_description: Manage LXC Containers version_added: 1.8.0 description: - Management of LXC containers -author: "Kevin Carter (@cloudnull)" +author: '"Kevin Carter (@cloudnull)" ' options: name: description: diff --git a/clustering/consul.py b/clustering/consul.py index 1c1fa1ab4e8..9195c0ff591 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -42,7 +42,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: "Steve Gargan (@sgargan)" +author: '"Steve Gargan (@sgargan)" ' options: state: description: diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index a7fbc16b0ca..31cb01d1404 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -30,7 +30,7 @@ requirements: - pyhcl - requests version_added: "1.9" -author: "Steve Gargan (@sgargan)" +author: '"Steve Gargan (@sgargan)" ' options: mgmt_token: description: diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 7855368a8ab..13437d95cce 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -32,7 +32,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: "Steve Gargan (@sgargan)" +author: '"Steve Gargan (@sgargan)" ' options: state: description: diff --git a/clustering/consul_session.py b/clustering/consul_session.py index 278cb4e26ed..8e7f763a21d 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -30,7 +30,7 @@ requirements: - python-consul - requests version_added: "1.9" -author: "Steve Gargan (@sgargan)" +author: '"Steve Gargan (@sgargan)" ' options: state: description: diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index cb4b530b646..83a3395216e 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -91,7 +91,7 @@ notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] -author: "Elliott Foster (@elliotttf)" +author: '"Elliott Foster (@elliotttf)" ' ''' EXAMPLES = ''' diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 2bad4c1439e..898b1510c1d 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -30,7 +30,7 @@ short_description: Manage MySQL replication description: - Manages MySQL server replication, slave, master status get and change master host. version_added: "1.3" -author: "Balazs Pocze (@banyek)" +author: '"Balazs Pocze (@banyek)" ' options: mode: description: diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index c3686ae699b..0036bde7daa 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: airbrake_deployment version_added: "1.2" -author: "Bruce Pennypacker (@bpennypacker)" +author: '"Bruce Pennypacker (@bpennypacker)" ' short_description: Notify airbrake about app deployments description: - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index daab4820408..adc2b2433e1 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -34,7 +34,7 @@ short_description: Manage boundary meters description: - This module manages boundary meters version_added: "1.3" -author: "curtis (@ccollicutt)" +author: '"curtis (@ccollicutt)" ' requirements: - Boundary API access - bprobe is required to send data, but not to register a meter diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index ed5439ac061..1d6a98dc9c3 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -14,7 +14,7 @@ description: - "Allows to post events to DataDog (www.datadoghq.com) service." - "Uses http://docs.datadoghq.com/api/#events API." version_added: "1.3" -author: "Artūras 'arturaz' Šlajus (@arturaz)" +author: '"Artūras `arturaz` Šlajus (@arturaz)" ' notes: [] requirements: [urllib2] options: diff --git a/monitoring/logentries.py b/monitoring/logentries.py index 1d511dc4e4a..75ed2e0e6dd 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -19,8 +19,8 @@ DOCUMENTATION = ''' --- module: logentries -author: "Ivan Vanderbyl (@ivanvanderbyl)" -short_description: Module for tracking logs via logentries.com +author: '"Ivan Vanderbyl (@ivanvanderbyl)" ' +short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime version_added: "1.6" diff --git a/monitoring/monit.py b/monitoring/monit.py index 52e56bfc44e..e87d8edca5a 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -39,7 +39,7 @@ options: default: null choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] requirements: [ ] -author: "Darryl Stoflet (@dstoflet)" +author: '"Darryl Stoflet (@dstoflet)" ' ''' EXAMPLES = ''' From b1cbbcef4a319afcd2baa2d6fb86a0126f95ca78 Mon Sep 17 00:00:00 2001 From: Willy Barro Date: Thu, 14 May 2015 10:14:39 -0300 Subject: [PATCH 393/720] Fix pushbullet compatibility with python 2.6 Remove dict comprehension usage. --- notification/pushbullet.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/notification/pushbullet.py b/notification/pushbullet.py index f7b7172d413..52d785306ce 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -144,7 +144,9 @@ def main(): # Search for given device if device is not None: - devices_by_nickname = {d.nickname: d for d in pb.devices} + devices_by_nickname = {} + for d in pb.devices: + devices_by_nickname[d.nickname] = d if device in devices_by_nickname: target = devices_by_nickname[device] @@ -153,7 +155,9 @@ def main(): # Search for given channel if channel is not None: - channels_by_tag = {c.channel_tag: c for c in pb.channels} + channels_by_tag = {} + for c in pb.channels: + channels_by_tag[c.channel_tag] = c if channel in channels_by_tag: target = channels_by_tag[channel] From edf519a0c2b7ef024eca81fa3e5e7d977b7f48b0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 14 May 2015 09:37:00 -0400 Subject: [PATCH 394/720] moar quotes --- monitoring/newrelic_deployment.py | 2 +- monitoring/zabbix_maintenance.py | 2 +- network/a10/a10_server.py | 2 +- network/a10/a10_service_group.py | 2 +- network/a10/a10_virtual_server.py | 2 +- network/f5/bigip_facts.py | 2 +- network/f5/bigip_monitor_http.py | 2 +- network/f5/bigip_monitor_tcp.py | 2 +- network/f5/bigip_node.py | 2 +- network/f5/bigip_pool.py | 2 +- network/f5/bigip_pool_member.py | 2 +- notification/campfire.py | 2 +- notification/flowdock.py | 2 +- packaging/language/bower.py | 2 +- packaging/language/composer.py | 2 +- packaging/language/cpanm.py | 2 +- packaging/os/dnf.py | 2 +- source_control/bzr.py | 2 +- system/alternatives.py | 4 ++-- system/at.py | 2 +- system/capabilities.py | 2 +- system/crypttab.py | 2 +- system/filesystem.py | 2 +- system/firewalld.py | 2 +- system/lvg.py | 2 +- system/lvol.py | 6 +++--- web_infrastructure/ejabberd_user.py | 2 +- 27 files changed, 30 insertions(+), 30 deletions(-) diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 27e6f52b8e6..91a08da4871 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: newrelic_deployment version_added: "1.2" -author: "Matt Coddington (@mcodd)" +author: '"Matt Coddington (@mcodd)" ' short_description: Notify newrelic about app deployments description: - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 859e79dc8f2..02938234fbf 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -26,7 +26,7 @@ short_description: Create Zabbix maintenance windows description: - This module will let you create Zabbix maintenance windows. version_added: "1.8" -author: "Alexander Bulimov (@abulimov)" +author: '"Alexander Bulimov (@abulimov)" ' requirements: - "python >= 2.6" - zabbix-api diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 58cda6c402b..2d7b8cc5d9c 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb server objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" +author: '"Mischa Peters (@mischapeters)" ' notes: - Requires A10 Networks aXAPI 2.1 options: diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py index 277021bce33..8e84bf9a07d 100644 --- a/network/a10/a10_service_group.py +++ b/network/a10/a10_service_group.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb service-group objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" +author: '"Mischa Peters (@mischapeters)" ' notes: - Requires A10 Networks aXAPI 2.1 - When a server doesn't exist and is added to the service-group the server will be created diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py index 1387813c91d..3df93f67dbe 100644 --- a/network/a10/a10_virtual_server.py +++ b/network/a10/a10_virtual_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" +author: '"Mischa Peters (@mischapeters)" ' notes: - Requires A10 Networks aXAPI 2.1 requirements: diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 3ea92ef0049..4522d61bbad 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" -author: "Matt Hite (@mhite)" +author: '"Matt Hite (@mhite)" ' notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 1fabc5ebd63..6a31afb2ee7 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors" description: - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" version_added: "1.4" -author: "Serge van Ginderachter (@srvg)" +author: '"Serge van Ginderachter (@srvg)" ' notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 95a35ed61c8..d5855e0f15d 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" -author: "Serge van Ginderachter (@srvg)" +author: '"Serge van Ginderachter (@srvg)" ' notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 5c38a10ca9a..31e34fdeb47 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes" description: - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" version_added: "1.4" -author: "Matt Hite (@mhite)" +author: '"Matt Hite (@mhite)" ' notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 3347ef55861..2eaaf8f3a34 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools" description: - "Manages F5 BIG-IP LTM pools via iControl SOAP API" version_added: "1.2" -author: "Matt Hite (@mhite)" +author: '"Matt Hite (@mhite)" ' notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index f20deb223a5..bc4b7be2f7b 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members" description: - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" version_added: "1.4" -author: "Matt Hite (@mhite)" +author: '"Matt Hite (@mhite)" ' notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/notification/campfire.py b/notification/campfire.py index f302b65bc8d..9218826a7b4 100644 --- a/notification/campfire.py +++ b/notification/campfire.py @@ -43,7 +43,7 @@ options: # informational: requirements for nodes requirements: [ urllib2, cgi ] -author: "Adam Garside (@fabulops)" +author: '"Adam Garside (@fabulops)" ' ''' EXAMPLES = ''' diff --git a/notification/flowdock.py b/notification/flowdock.py index 41a23434012..aea107457fb 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: flowdock version_added: "1.2" -author: "Matt Coddington (@mcodd)" +author: '"Matt Coddington (@mcodd)" ' short_description: Send a message to a flowdock description: - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 3687be0c671..f0dd58023ba 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -25,7 +25,7 @@ short_description: Manage bower packages with bower description: - Manage bower packages with bower version_added: 1.9 -author: "Michael Warkentin (@mwarkentin)" +author: '"Michael Warkentin (@mwarkentin)" ' options: name: description: diff --git a/packaging/language/composer.py b/packaging/language/composer.py index b6390bce4d9..5bbd948595a 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: composer -author: "Dimitrios Tydeas Mengidis (@dmtrs)" +author: '"Dimitrios Tydeas Mengidis (@dmtrs)" ' short_description: Dependency Manager for PHP version_added: "1.6" description: diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 5d36ec30844..5549dab8895 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -73,7 +73,7 @@ examples: description: Install I(Dancer) perl package from a specific mirror notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: "Franck Cuny (@franckcuny)" +author: '"Franck Cuny (@franckcuny)" ' ''' def _is_package_installed(module, name, locallib, cpanm): diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index 305b79067d3..c76f39b1dd6 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -93,7 +93,7 @@ options: notes: [] # informational: requirements for nodes requirements: [ dnf ] -author: "Cristian van Ee (@DJMuggs)" +author: '"Cristian van Ee (@DJMuggs)" ' ''' EXAMPLES = ''' diff --git a/source_control/bzr.py b/source_control/bzr.py index bf2c873fb81..5519a8af123 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -22,7 +22,7 @@ DOCUMENTATION = u''' --- module: bzr -author: "André Paramés (@andreparames)" +author: '"André Paramés (@andreparames)" ' version_added: "1.1" short_description: Deploy software (or files) from bzr branches description: diff --git a/system/alternatives.py b/system/alternatives.py index c82eac951ad..c298afc2949 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -31,8 +31,8 @@ description: - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" author: - - "David Wittman (@DavidWittman)" - - "Gabe Mulley (@mulby)" + - '"David Wittman (@DavidWittman)" ' + - '"Gabe Mulley (@mulby)" ' options: name: description: diff --git a/system/at.py b/system/at.py index fb5fbdf6900..03ac14a44aa 100644 --- a/system/at.py +++ b/system/at.py @@ -59,7 +59,7 @@ options: default: false requirements: - at -author: "Richard Isaacson (@risaacson)" +author: '"Richard Isaacson (@risaacson)" ' ''' EXAMPLES = ''' diff --git a/system/capabilities.py b/system/capabilities.py index c20cd3a9fff..0c7f2e22d0b 100644 --- a/system/capabilities.py +++ b/system/capabilities.py @@ -50,7 +50,7 @@ notes: and flags to compare, so you will want to ensure that your capabilities argument matches the final capabilities. requirements: [] -author: "Nate Coraor (@natefoo)" +author: '"Nate Coraor (@natefoo)" ' ''' EXAMPLES = ''' diff --git a/system/crypttab.py b/system/crypttab.py index 8f2f563bdfd..5b0edc62363 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -69,7 +69,7 @@ options: notes: [] requirements: [] -author: "Steve (@groks)" +author: '"Steve (@groks)" ' ''' EXAMPLES = ''' diff --git a/system/filesystem.py b/system/filesystem.py index 3711a27024c..a2f979ecd0b 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: "Alexander Bulimov (@abulimov)" +author: '"Alexander Bulimov (@abulimov)" ' module: filesystem short_description: Makes file system on block device description: diff --git a/system/firewalld.py b/system/firewalld.py index efdd9611613..77cfc4b6bb8 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -69,7 +69,7 @@ options: notes: - Not tested on any debian based system. requirements: [ firewalld >= 0.2.11 ] -author: "Adam Miller (@maxamillion)" +author: '"Adam Miller (@maxamillion)" ' ''' EXAMPLES = ''' diff --git a/system/lvg.py b/system/lvg.py index 3b3b8b5a7c7..955b94668dc 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- -author: "Alexander Bulimov (@abulimov)" +author: '"Alexander Bulimov (@abulimov)" ' module: lvg short_description: Configure LVM volume groups description: diff --git a/system/lvol.py b/system/lvol.py index 7fa483d318a..dc5cbb66732 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -20,9 +20,9 @@ DOCUMENTATION = ''' --- -author: - - "Jeroen Hoekx (@jhoekx)" - - "Alexander Bulimov (@abulimov)" +author: + - '"Jeroen Hoekx (@jhoekx)" ' + - '"Alexander Bulimov (@abulimov)" ' module: lvol short_description: Configure LVM logical volumes description: diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py index 2031cd61b41..79fe94fcddc 100755 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ejabberd_user version_added: "1.5" -author: "Peter Sprygada (@privateip)" +author: '"Peter Sprygada (@privateip)" ' short_description: Manages users for ejabberd servers requirements: - ejabberd with mod_admin_extra From 112f4262f1949f1d544357833af8e4b7d1a844b0 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Thu, 14 May 2015 15:26:20 +0100 Subject: [PATCH 395/720] Make compatible with python 2.4 Removed one line if else --- messaging/rabbitmq_binding.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index 2d34f0c71bf..b8adb94ec6a 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -121,12 +121,17 @@ def main(): supports_check_mode = True ) + if module.params['destinationType'] == "queue": + destType="q" + else: + destType="e" + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % ( module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), module.params['name'], - "q" if module.params['destinationType'] == "queue" else "e", + destType, module.params['destination'], urllib.quote(module.params['routingKey'],'') ) @@ -146,7 +151,10 @@ def main(): details = r.text ) - changeRequired = not bindingExists if module.params['state']=='present' else bindingExists + if module.params['state']=='present': + changeRequired = not bindingExists + else: + changeRequired = bindingExists # Exit if check_mode if module.check_mode: @@ -165,7 +173,7 @@ def main(): module.params['login_port'], urllib.quote(module.params['vhost'],''), module.params['name'], - "q" if module.params['destinationType'] == "queue" else "e", + destType, module.params['destination'] ) From 4882f2bbbe046d382c14d12cff3418998ad2792a Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Thu, 14 May 2015 15:30:10 +0100 Subject: [PATCH 396/720] Make compatible with python Removed one line if else --- messaging/rabbitmq_exchange.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index f184acc024e..4cceac6b85f 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -150,7 +150,10 @@ def main(): details = r.text ) - changeRequired = not exchangeExists if module.params['state']=='present' else exchangeExists + if module.params['state']=='present': + changeRequired = not exchangeExists + else: + changeRequired = exchangeExists # Check if attributes change on existing exchange if not changeRequired and r.status_code==200 and module.params['state'] == 'present': From 6354e835d18816d5278798b5bd0ae286cee92035 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Thu, 14 May 2015 15:36:31 +0100 Subject: [PATCH 397/720] Removed leftovers from testing the module --- messaging/rabbitmq_queue.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index ac5743fef55..1abd9dfc2d7 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -187,9 +187,6 @@ def main(): ): module.fail_json( msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues", - details = "XPTO", - src = json.dumps(response['arguments']), - dest = json.dumps(module.params) ) From 08445418aa5e7aebf44f616f46bae885044bc39a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 14 May 2015 10:45:21 -0400 Subject: [PATCH 398/720] more string corrections --- cloud/misc/ovirt.py | 2 +- files/patch.py | 6 +++--- messaging/rabbitmq_parameter.py | 2 +- messaging/rabbitmq_plugin.py | 2 +- messaging/rabbitmq_policy.py | 2 +- messaging/rabbitmq_user.py | 2 +- messaging/rabbitmq_vhost.py | 2 +- monitoring/nagios.py | 2 +- network/citrix/netscaler.py | 2 +- network/openvswitch_bridge.py | 2 +- network/openvswitch_port.py | 2 +- notification/grove.py | 6 +++--- notification/irc.py | 6 +++--- notification/mail.py | 2 +- notification/nexmo.py | 2 +- packaging/language/maven_artifact.py | 2 +- packaging/language/npm.py | 2 +- packaging/os/homebrew.py | 6 +++--- packaging/os/homebrew_cask.py | 2 +- packaging/os/homebrew_tap.py | 2 +- packaging/os/layman.py | 2 +- packaging/os/openbsd_pkg.py | 2 +- packaging/os/opkg.py | 2 +- packaging/os/zypper.py | 2 +- packaging/os/zypper_repository.py | 2 +- source_control/github_hooks.py | 2 +- system/gluster_volume.py | 2 +- system/kernel_blacklist.py | 2 +- system/known_hosts.py | 2 +- system/modprobe.py | 4 ++-- system/open_iscsi.py | 2 +- web_infrastructure/jboss.py | 2 +- web_infrastructure/jira.py | 2 +- 33 files changed, 42 insertions(+), 42 deletions(-) diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py index 183a2394708..718f25fec2c 100755 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ovirt -author: "Vincent Van der Kussen (@vincentvdk)" +author: '"Vincent Van der Kussen (@vincentvdk)" ' short_description: oVirt/RHEV platform management description: - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform diff --git a/files/patch.py b/files/patch.py index 809069c9bac..c2982e2380e 100755 --- a/files/patch.py +++ b/files/patch.py @@ -22,9 +22,9 @@ DOCUMENTATION = ''' --- module: patch -author: - - "Jakub Jirutka (@jirutka)" - - "Luis Alberto Perez Lazaro (@luisperlaz)" +author: + - '"Jakub Jirutka (@jirutka)" ' + - '"Luis Alberto Perez Lazaro (@luisperlaz)" ' version_added: 1.9 description: - Apply patch files using the GNU patch tool. diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py index 85b29cdfd10..6be18bdce3d 100644 --- a/messaging/rabbitmq_parameter.py +++ b/messaging/rabbitmq_parameter.py @@ -25,7 +25,7 @@ short_description: Adds or removes parameters to RabbitMQ description: - Manage dynamic, cluster-wide parameters for RabbitMQ version_added: "1.1" -author: "Chris Hoffman (@chrishoffman)" +author: '"Chris Hoffman (@chrishoffman)"' options: component: description: diff --git a/messaging/rabbitmq_plugin.py b/messaging/rabbitmq_plugin.py index 8fa1f3d0f17..db23df3fcc8 100644 --- a/messaging/rabbitmq_plugin.py +++ b/messaging/rabbitmq_plugin.py @@ -25,7 +25,7 @@ short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" -author: "Chris Hoffman (@chrishoffman)" +author: '"Chris Hoffman (@chrishoffman)"' options: names: description: diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py index 97a800d854f..a4d94decbd1 100644 --- a/messaging/rabbitmq_policy.py +++ b/messaging/rabbitmq_policy.py @@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ. description: - Manage the state of a virtual host in RabbitMQ. version_added: "1.5" -author: "John Dewey (@retr0h)" +author: '"John Dewey (@retr0h)" ' options: name: description: diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index 41433c05190..6333e42282e 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -25,7 +25,7 @@ short_description: Adds or removes users to RabbitMQ description: - Add or remove users to RabbitMQ and assign permissions version_added: "1.1" -author: "Chris Hoffman (@chrishoffman)" +author: '"Chris Hoffman (@chrishoffman)"' options: user: description: diff --git a/messaging/rabbitmq_vhost.py b/messaging/rabbitmq_vhost.py index d1bee397caf..dbde32393cb 100644 --- a/messaging/rabbitmq_vhost.py +++ b/messaging/rabbitmq_vhost.py @@ -26,7 +26,7 @@ short_description: Manage the state of a virtual host in RabbitMQ description: - Manage the state of a virtual host in RabbitMQ version_added: "1.1" -author: "Chris Hoffman (@choffman)" +author: '"Chris Hoffman (@choffman)"' options: name: description: diff --git a/monitoring/nagios.py b/monitoring/nagios.py index bf2fded7577..64716e81c71 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -73,7 +73,7 @@ options: required: true default: null -author: "Tim Bielawa (@tbielawa)" +author: '"Tim Bielawa (@tbielawa)" ' requirements: [ "Nagios" ] ''' diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index a23eef15c65..8f78e23caac 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -82,7 +82,7 @@ options: choices: ['yes', 'no'] requirements: [ "urllib", "urllib2" ] -author: "Nandor Sivok (@dominis)" +author: '"Nandor Sivok (@dominis)" ' ''' EXAMPLES = ''' diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py index 6b8119f5875..28df3e84426 100644 --- a/network/openvswitch_bridge.py +++ b/network/openvswitch_bridge.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_bridge version_added: 1.4 -author: "David Stygstra (@stygstra)" +author: '"David Stygstra (@stygstra)" ' short_description: Manage Open vSwitch bridges requirements: [ ovs-vsctl ] description: diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index 028300d6b70..ab87ea42b4a 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_port version_added: 1.4 -author: "David Stygstra (@stygstra)" +author: '"David Stygstra (@stygstra)" ' short_description: Manage Open vSwitch ports requirements: [ ovs-vsctl ] description: diff --git a/notification/grove.py b/notification/grove.py index d705c000012..5c27b18c30f 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -25,11 +25,11 @@ options: required: true url: description: - - Service URL for the web client + - Service URL for the web client required: false icon_url: description: - - Icon for the service + - Icon for the service required: false validate_certs: description: @@ -39,7 +39,7 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 -author: "Jonas Pfenniger (@zimbatm)" +author: '"Jonas Pfenniger (@zimbatm)" ' ''' EXAMPLES = ''' diff --git a/notification/irc.py b/notification/irc.py index b55e3ec7c42..8b87c41f1ba 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -80,9 +80,9 @@ options: # informational: requirements for nodes requirements: [ socket ] -author: - - "Jan-Piet Mens (@jpmens)" - - "Matt Martz (@sivel)" +author: + - '"Jan-Piet Mens (@jpmens)"' + - '"Matt Martz (@sivel)"' ''' EXAMPLES = ''' diff --git a/notification/mail.py b/notification/mail.py index 89f1b36a0ad..4feaebf5d36 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ --- -author: "Dag Wieers (@dagwieers)" +author: '"Dag Wieers (@dagwieers)" ' module: mail short_description: Send an email description: diff --git a/notification/nexmo.py b/notification/nexmo.py index 0726b65cf83..a1dd9c2b64d 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo description: - Send a SMS message via nexmo version_added: 1.6 -author: "Matt Martz (@sivel)" +author: '"Matt Martz (@sivel)" ' options: api_key: description: diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index 81c120b90e6..d6dd33166dc 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -37,7 +37,7 @@ description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not - available. -author: "Chris Schmidt (@chrisisbeef)" +author: '"Chris Schmidt (@chrisisbeef)" ' requirements: - "python >= 2.6" - lxml diff --git a/packaging/language/npm.py b/packaging/language/npm.py index d632eae1719..3eafcd6c2a7 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -25,7 +25,7 @@ short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 -author: "Chris Hoffman (@chrishoffman)" +author: '"Chris Hoffman (@chrishoffman)" ' options: name: description: diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 609682207d8..f6d63b17d3c 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -22,9 +22,9 @@ DOCUMENTATION = ''' --- module: homebrew -author: - - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham (@andrew-d)" +author: + - '"Daniel Jaouen (@danieljaouen)" ' + - '"Andrew Dunham (@andrew-d)" ' short_description: Package manager for Homebrew description: - Manages Homebrew packages diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index 9e20149300f..292da6c7f59 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: homebrew_cask -author: "Daniel Jaouen (@danieljaouen)" +author: '"Daniel Jaouen (@danieljaouen)" ' short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index 6402aa14ac1..1e0b6b66169 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -24,7 +24,7 @@ import re DOCUMENTATION = ''' --- module: homebrew_tap -author: "Daniel Jaouen (@danieljaouen)" +author: '"Daniel Jaouen (@danieljaouen)" ' short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. diff --git a/packaging/os/layman.py b/packaging/os/layman.py index b4155d13b68..3cad5e35642 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -25,7 +25,7 @@ from urllib2 import Request, urlopen, URLError DOCUMENTATION = ''' --- module: layman -author: "Jakub Jirutka (@jirutka)" +author: '"Jakub Jirutka (@jirutka)" ' version_added: "1.6" short_description: Manage Gentoo overlays description: diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 10bfbdc8f96..2f81753fb64 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -25,7 +25,7 @@ import syslog DOCUMENTATION = ''' --- module: openbsd_pkg -author: "Patrik Lundin (@eest)" +author: '"Patrik Lundin (@eest)" ' version_added: "1.1" short_description: Manage packages on OpenBSD. description: diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py index 7f6c57a659c..8f06a03a1b2 100644 --- a/packaging/os/opkg.py +++ b/packaging/os/opkg.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: opkg -author: "Patrick Pelletier (@skinp)" +author: '"Patrick Pelletier (@skinp)" ' short_description: Package manager for OpenWrt description: - Manages OpenWrt packages diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index a1b2a391e99..c175c152050 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -31,7 +31,7 @@ import re DOCUMENTATION = ''' --- module: zypper -author: "Patrick Callahan (@dirtyharrycallahan)" +author: '"Patrick Callahan (@dirtyharrycallahan)" ' version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 4853bdaff07..3210e93d391 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: zypper_repository -author: "Matthias Vogelgesang (@matze)" +author: '"Matthias Vogelgesang (@matze)" ' version_added: "1.4" short_description: Add and remove Zypper repositories description: diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py index 9184a7b8229..bb60b634cb3 100644 --- a/source_control/github_hooks.py +++ b/source_control/github_hooks.py @@ -64,7 +64,7 @@ options: default: 'json' choices: ['json', 'form'] -author: "Phillip Gentry, CX Inc (@pcgentry)" +author: '"Phillip Gentry, CX Inc (@pcgentry)" ' ''' EXAMPLES = ''' diff --git a/system/gluster_volume.py b/system/gluster_volume.py index cb554b74e1c..7b83c62297f 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -103,7 +103,7 @@ options: notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" -author: "Taneli Leppä (@rosmo)" +author: '"Taneli Leppä (@rosmo)" ' """ EXAMPLES = """ diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py index e1e8c8fcb4a..b0901473867 100644 --- a/system/kernel_blacklist.py +++ b/system/kernel_blacklist.py @@ -25,7 +25,7 @@ import re DOCUMENTATION = ''' --- module: kernel_blacklist -author: "Matthias Vogelgesang (@matze)" +author: '"Matthias Vogelgesang (@matze)" ' version_added: 1.4 short_description: Blacklist kernel modules description: diff --git a/system/known_hosts.py b/system/known_hosts.py index 2af84c07d96..74c6b0e90c7 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -51,7 +51,7 @@ options: required: no default: present requirements: [ ] -author: "Matthew Vernon (@mcv21)" +author: '"Matthew Vernon (@mcv21)" ' ''' EXAMPLES = ''' diff --git a/system/modprobe.py b/system/modprobe.py index f3b22209dd9..bf58e435552 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -25,8 +25,8 @@ module: modprobe short_description: Add or remove kernel modules requirements: [] version_added: 1.4 -author: - - "David Stygstra (@stygstra)" +author: + - '"David Stygstra (@stygstra)" ' - Julien Dauphant - Matt Jeffery description: diff --git a/system/open_iscsi.py b/system/open_iscsi.py index aa9271bc259..97652311f8d 100644 --- a/system/open_iscsi.py +++ b/system/open_iscsi.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: open_iscsi -author: "Serge van Ginderachter (@srvg)" +author: '"Serge van Ginderachter (@srvg)" ' version_added: "1.4" short_description: Manage iscsi targets with open-iscsi description: diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py index 781c60b00cd..a0949c47531 100644 --- a/web_infrastructure/jboss.py +++ b/web_infrastructure/jboss.py @@ -47,7 +47,7 @@ options: notes: - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - "Ensure no identically named application is deployed through the JBoss CLI" -author: "Jeroen Hoekx (@jhoekx)" +author: '"Jeroen Hoekx (@jhoekx)" ' """ EXAMPLES = """ diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py index b661185a316..3dc963cb6bd 100644 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -99,7 +99,7 @@ options: notes: - "Currently this only works with basic-auth." -author: "Steve Smith (@tarka)" +author: '"Steve Smith (@tarka)" ' """ EXAMPLES = """ From 8781bf828104876426f999994db210e3c0eb1c48 Mon Sep 17 00:00:00 2001 From: Chris Long Date: Fri, 15 May 2015 00:45:51 +1000 Subject: [PATCH 399/720] Updated as per bcoca's comments: removed 'default' in state: removed defunct action: removed reference to load_platform_subclass changed cname to conn_name --- network/nmcli.py | 202 ++++++++++++++++++++++------------------------- 1 file changed, 93 insertions(+), 109 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 0532058da3b..55edb322ad7 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -30,7 +30,6 @@ description: options: state: required: True - default: "present" choices: [ present, absent ] description: - Whether the device should exist or not, taking action if the state is different from what is stated. @@ -41,25 +40,14 @@ options: description: - Whether the service should start on boot. B(At least one of state and enabled are required.) - Whether the connection profile can be automatically activated ( default: yes) - action: - required: False - default: None - choices: [ add, modify, show, up, down ] - description: - - Set to 'add' if you want to add a connection. - - Set to 'modify' if you want to modify a connection. Modify one or more properties in the connection profile. - - Set to 'delete' if you want to delete a connection. Delete a configured connection. The connection to be deleted is identified by its name 'cfname'. - - Set to 'show' if you want to show a connection. Will show all devices unless 'cfname' is set. - - Set to 'up' if you want to bring a connection up. Requires 'cfname' to be set. - - Set to 'down' if you want to bring a connection down. Requires 'cfname' to be set. - cname: + conn_name: required: True default: None description: - - Where CNAME will be the name used to call the connection. when not provided a default name is generated: [-][-] + - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] ifname: required: False - default: cname + default: conn_name description: - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. - interface to bind the connection to. The connection will only be applicable to this interface name. @@ -80,7 +68,7 @@ options: required: False default: None description: - - master Date: Fri, 15 May 2015 01:09:49 +1000 Subject: [PATCH 400/720] Fixed descriptions to all be lists replaced enabled with autoconnect - refactored code to reflect update. removed ansible syslog entry. --- network/nmcli.py | 66 +++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 55edb322ad7..18f0ecbab1f 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -31,25 +31,24 @@ options: state: required: True choices: [ present, absent ] - description: - - Whether the device should exist or not, taking action if the state is different from what is stated. - enabled: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + autoconnect: required: False default: "yes" choices: [ "yes", "no" ] description: - - Whether the service should start on boot. B(At least one of state and enabled are required.) + - Whether the connection should start on boot. - Whether the connection profile can be automatically activated ( default: yes) conn_name: required: True - default: None description: - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] ifname: required: False default: conn_name description: - - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. + - Where IFNAME will be the what we call the interface name. - interface to bind the connection to. The connection will only be applicable to this interface name. - A special value of "*" can be used for interface-independent connections. - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. @@ -72,14 +71,17 @@ options: ip4: required: False default: None - description: The IPv4 address to this interface using this format ie: "192.168.1.24/24" + description: + - The IPv4 address to this interface using this format ie: "192.168.1.24/24" gw4: required: False - description: The IPv4 gateway for this interface using this format ie: "192.168.100.1" + description: + - The IPv4 gateway for this interface using this format ie: "192.168.100.1" dns4: required: False default: None - description: A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] + description: + - A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] ip6: required: False default: None @@ -88,10 +90,12 @@ options: gw6: required: False default: None - description: The IPv6 gateway for this interface using this format ie: "2001:db8::1" + description: + - The IPv6 gateway for this interface using this format ie: "2001:db8::1" dns6: required: False - description: A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] + description: + - A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] mtu: required: False default: None @@ -343,7 +347,7 @@ tenant_ip: "192.168.200.21/23" - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present # To add an Team connection with static IP configuration, issue a command as follows -- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present enabled=yes +- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes # Optionally, at the same time specify IPv6 addresses for the device as follows: - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present @@ -430,10 +434,9 @@ class Nmcli(object): def __init__(self, module): self.module=module self.state=module.params['state'] - self.enabled=module.params['enabled'] + self.autoconnect=module.params['autoconnect'] self.conn_name=module.params['conn_name'] self.master=module.params['master'] - self.autoconnect=module.params['autoconnect'] self.ifname=module.params['ifname'] self.type=module.params['type'] self.ip4=module.params['ip4'] @@ -602,9 +605,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def modify_connection_team(self): @@ -631,9 +634,9 @@ class Nmcli(object): if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) # Can't use MTU with team return cmd @@ -704,9 +707,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) if self.mode is not None: cmd.append('mode') cmd.append(self.mode) @@ -751,9 +754,9 @@ class Nmcli(object): if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def create_connection_bond_slave(self): @@ -820,9 +823,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def modify_connection_ethernet(self): @@ -855,9 +858,9 @@ class Nmcli(object): if self.mtu is not None: cmd.append('802-3-ethernet.mtu') cmd.append(self.mtu) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def create_connection_bridge(self): @@ -966,11 +969,10 @@ def main(): # Parsing argument file module=AnsibleModule( argument_spec=dict( - enabled=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), - conn_name=dict(required=False, type='str'), + conn_name=dict(required=True, type='str'), master=dict(required=False, default=None, type='str'), - autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), ifname=dict(required=False, default=None, type='str'), type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'), ip4=dict(required=False, default=None, type='str'), @@ -1009,12 +1011,6 @@ def main(): nmcli=Nmcli(module) - if nmcli.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Nmcli instantiated - platform %s' % nmcli.platform) - if nmcli.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Nuser instantiated - distribution %s' % nmcli.distribution) - rc=None out='' err='' From c7fe644c70885f1d9f4656915e24a4ae3039ee7b Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Thu, 14 May 2015 16:37:51 +0100 Subject: [PATCH 401/720] Make compatible with python 2.4 Removed one line if else --- messaging/rabbitmq_queue.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 1abd9dfc2d7..1fb274328b6 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -162,7 +162,10 @@ def main(): details = r.text ) - changeRequired = not queueExists if module.params['state']=='present' else queueExists + if module.params['state']=='present': + changeRequired = not queueExists + else: + changeRequired = queueExists # Check if attributes change on existing queue if not changeRequired and r.status_code==200 and module.params['state'] == 'present': @@ -170,19 +173,24 @@ def main(): response['durable'] == module.params['durable'] and response['auto_delete'] == module.params['autoDelete'] and ( - response['arguments']['x-message-ttl'] == module.params['messageTTL'] if 'x-message-ttl' in response['arguments'] else module.params['messageTTL'] is None + ( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['messageTTL'] ) or + ( 'x-message-ttl' not in response['arguments'] and module.params['messageTTL'] is None ) ) and ( - response['arguments']['x-expires'] == module.params['autoExpire'] if 'x-expires' in response['arguments'] else module.params['autoExpire'] is None + ( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['autoExpire'] ) or + ( 'x-expires' not in response['arguments'] and module.params['autoExpire'] is None ) ) and ( - response['arguments']['x-max-length'] == module.params['maxLength'] if 'x-max-length' in response['arguments'] else module.params['maxLength'] is None + ( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['maxLength'] ) or + ( 'x-max-length' not in response['arguments'] and module.params['maxLength'] is None ) ) and ( - response['arguments']['x-dead-letter-exchange'] == module.params['deadLetterExchange'] if 'x-dead-letter-exchange' in response['arguments'] else module.params['deadLetterExchange'] is None + ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['deadLetterExchange'] ) or + ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['deadLetterExchange'] is None ) ) and ( - response['arguments']['x-dead-letter-routing-key'] == module.params['deadLetterRoutingKey'] if 'x-dead-letter-routing-key' in response['arguments'] else module.params['deadLetterRoutingKey'] is None + ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['deadLetterRoutingKey'] ) or + ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['deadLetterRoutingKey'] is None ) ) ): module.fail_json( From 1cc23bd9a08af153ffb7ba491560c768e77646bc Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 13 May 2015 20:18:38 -0500 Subject: [PATCH 402/720] Specify that travis should use python2.6 --- .travis.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7fda5b98133..84ec3a0983a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,15 @@ sudo: false language: python +python: + - "2.7" addons: apt: sources: - deadsnakes packages: - python2.4 + - python2.6 script: - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - - python -m compileall -fq . + - python2.6 -m compileall -fq . + - python2.7 -m compileall -fq . From 44fa32f2dcba8bcf93a68861434802094555bcfa Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 15 May 2015 00:29:53 +0100 Subject: [PATCH 403/720] Add win_environment module --- windows/win_environment.ps1 | 69 ++++++++++++++++++++++++++++++++ windows/win_environment.py | 80 +++++++++++++++++++++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 windows/win_environment.ps1 create mode 100644 windows/win_environment.py diff --git a/windows/win_environment.ps1 b/windows/win_environment.ps1 new file mode 100644 index 00000000000..3dc936f13ff --- /dev/null +++ b/windows/win_environment.ps1 @@ -0,0 +1,69 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, J Hawkesworth @jhawkesworth +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +If ($params.state) { + $state = $params.state.ToString().ToLower() + If (($state -ne 'present') -and ($state -ne 'absent') ) { + Fail-Json $result "state is '$state'; must be 'present', or 'absent'" + } +} else { + $state = 'present' +} + +If ($params.name) +{ + $name = $params.name +} else { + Fail-Json $result "missing required argument: name" +} + +$value = $params.value + +If ($params.level) { + $level = $params.level.ToString().ToLower() + If (( $level -ne 'machine') -and ( $level -ne 'user' ) -and ( $level -ne 'process')) { + Fail-Json $result "level is '$level'; must be 'machine', 'user', or 'process'" + } +} + +$before_value = [Environment]::GetEnvironmentVariable($name, $level) + +if ($state -eq "present" ) { + [Environment]::SetEnvironmentVariable($name, $value, $level) +} Elseif ($state -eq "absent") { + [Environment]::SetEnvironmentVariable($name, $null, $level) +} + +$after_value = [Environment]::GetEnvironmentVariable($name, $level) + +Set-Attr $result "name" $name; +Set-Attr $result "before_value" $before_value; +Set-Attr $result "value" $after_value; +Set-Attr $result "level" $level; +if ($before_value -ne $after_value) { + Set-Attr $result "changed" $true; +} + +Exit-Json $result; diff --git a/windows/win_environment.py b/windows/win_environment.py new file mode 100644 index 00000000000..885649c4137 --- /dev/null +++ b/windows/win_environment.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Jon Hawkesworth +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_environment +version_added: "2.0" +short_description: Modifies environment variables on windows guests +description: + - Uses .net Environment to set or remove environment variables. + - Can set at User, Machine or Process level. + - Note that usual rules apply, so existing environments will not change until new processes are started. +options: + state: + description: + - present to ensure environment variable is set, or absent to ensure it is removed + required: false + default: present + choices: + - present + - absent + name: + description: + - The name of the environment variable + required: true + default: no default + value: + description: + - The value to store in the environment variable. Can be omitted for state=absent + required: false + default: no default + level: + description: + - The level at which to set the environment variable. + - Use 'machine' to set for all users. + - Use 'user' to set for the current user that ansible is connected as. + - Use 'process' to set for the current process. Probably not that useful. + required: true + default: no default + choices: + - machine + - process + - user +author: "Jon Hawkesworth (@jhawkesworth)" +''' + +EXAMPLES = ''' + # Set an environment variable for all users + win_environment: + state: present + name: TestVariable + value: "Test value" + level: machine + # Remove an environment variable for the current users + win_environment: + state: absent + name: TestVariable + level: user +''' + From ccd9a4eb6abd85f62d72a33ca270173ef0703e68 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 15 May 2015 00:40:30 +0100 Subject: [PATCH 404/720] Update attribution --- windows/win_environment.ps1 | 2 +- windows/win_environment.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_environment.ps1 b/windows/win_environment.ps1 index 3dc936f13ff..1398524cfbb 100644 --- a/windows/win_environment.ps1 +++ b/windows/win_environment.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible # -# Copyright 2015, J Hawkesworth @jhawkesworth +# Copyright 2015, Jon Hawkesworth (@jhawkesworth) # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/windows/win_environment.py b/windows/win_environment.py index 885649c4137..8d4a1701695 100644 --- a/windows/win_environment.py +++ b/windows/win_environment.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2015, Jon Hawkesworth +# (c) 2015, Jon Hawkesworth (@jhawkesworth) # # This file is part of Ansible # From a057cb2482b71f94620bffd5f19392f69e41c8ca Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Fri, 15 May 2015 17:32:24 +0600 Subject: [PATCH 405/720] Initial commit of Proxmox module --- cloud/misc/proxmox.py | 164 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 cloud/misc/proxmox.py diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py new file mode 100644 index 00000000000..16698d3b8ac --- /dev/null +++ b/cloud/misc/proxmox.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import logging + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +def get_instance(proxmox, vmid): + return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ] + +def content_check(proxmox, node, ostemplate, storage): + return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ] + +def node_check(proxmox, node): + return [ True for nd in proxmox.nodes.get() if nd['node'] == node ] + +def create_instance(proxmox, vmid, node, disk, storage, cpus, memory, swap, **kwargs): + proxmox_node = proxmox.nodes(node) + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(name)s: %(message)s') + proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap, + cpus=cpus, disk=disk, **kwargs) + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(), + vmid = dict(required=True), + https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + node = dict(), + password = dict(), + hostname = dict(), + ostemplate = dict(), + disk = dict(dtype='int', default=3), + cpus = dict(type='int', default=1), + memory = dict(type='int', default=512), + swap = dict(type='int', default=0), + netif = dict(), + ip_address = dict(), + onboot = dict(type='bool', choices=BOOLEANS, default='no'), + storage = dict(default='local'), + cpuunits = dict(type='int', default=1000), + nameserver = dict(), + searchdomain = dict(), + force = dict(type='bool', choices=BOOLEANS, default='no'), + state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restart']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + vmid = module.params['vmid'] + https_verify_ssl = module.params['https_verify_ssl'] + node = module.params['node'] + disk = module.params['disk'] + cpus = module.params['cpus'] + memory = module.params['memory'] + swap = module.params['swap'] + storage = module.params['storage'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError, e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + except Exception, e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + if get_instance(proxmox, vmid) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): + module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') + elif not node_check(proxmox, node): + module.fail_json(msg="node '%s' not exists in cluster" % node) + elif not content_check(proxmox, node, module.params['ostemplate'], storage): + module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" + % (module.params['ostemplate'], node, storage)) + + create_instance(proxmox, vmid, node, disk, storage, cpus, memory, swap, + password = module.params['password'], + hostname = module.params['hostname'], + ostemplate = module.params['ostemplate'], + netif = module.params['netif'], + ip_address = module.params['ip_address'], + onboot = int(module.params['onboot']), + cpuunits = module.params['cpuunits'], + nameserver = module.params['nameserver'], + searchdomain = module.params['searchdomain'], + force = int(module.params['force'])) + + module.exit_json(changed=True, vmid=vmid) + except Exception, e: + module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'started': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid %s not exists in cluster' % vmid) + if [ True for vm in proxmox.node(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'started' ]: + module.exit_json(changed=False, vmid=vmid) + + proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() + module.exit_json(changed=True, vmid=vmid) + except Exception, e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'stopped': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid %s not exists in cluster' % vmid) + if [ True for vm in proxmox.node(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped' ]: + module.exit_json(changed=False, vmid=vmid) + + proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post() + module.exit_json(changed=True, vmid=vmid) + except Exception, e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'absent': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.exit_json(changed=False, vmid=vmid) + + proxmox.nodes(vm[0]['node']).openvz.delete(vmid) + module.exit_json(changed=True, vmid=vmid) + except Exception, e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) + +# import module snippets +from ansible.module_utils.basic import * +main() From 6539add9d1177e471d3f7b6eb8b03c02d75608a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 16:47:23 +0300 Subject: [PATCH 406/720] gluster_volume: Typofix in docs (equals, not colon) --- system/gluster_volume.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7b83c62297f..7d080f8bfe6 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -108,7 +108,7 @@ author: '"Taneli Leppä (@rosmo)" ' EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="{{ play_hosts }}" run_once: true - name: tune @@ -127,7 +127,7 @@ EXAMPLES = """ gluster_volume: state=absent name=test1 - name: create gluster volume with multiple bricks - gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="{{ play_hosts }}" run_once: true """ From 73f4e2dd061d4c6f4adc80134bb2450139024916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 16:49:39 +0300 Subject: [PATCH 407/720] gluster_volume: Clarify error message to tell what actualy failed --- system/gluster_volume.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7d080f8bfe6..cb7882f6c56 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -247,11 +247,11 @@ def wait_for_peer(host): time.sleep(1) return False -def probe(host): +def probe(host, myhostname): global module run_gluster([ 'peer', 'probe', host ]) if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) + module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) changed = True def probe_all_peers(hosts, peers, myhostname): @@ -259,7 +259,7 @@ def probe_all_peers(hosts, peers, myhostname): if host not in peers: # dont probe ourselves if myhostname != host: - probe(host) + probe(host, myhostname) def create_volume(name, stripe, replica, transport, hosts, bricks, force): args = [ 'volume', 'create' ] From 77479882a4d67b101e844ef78a26795a17988fbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:24:18 +0300 Subject: [PATCH 408/720] gluster_volume: Parameter expects comma separated list of hosts, passing {{play_hosts}} will fail as Python does not parse it into a list --- system/gluster_volume.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index cb7882f6c56..2ea6b974adc 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -108,7 +108,7 @@ author: '"Taneli Leppä (@rosmo)" ' EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11" run_once: true - name: tune @@ -127,7 +127,7 @@ EXAMPLES = """ gluster_volume: state=absent name=test1 - name: create gluster volume with multiple bricks - gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="{{ play_hosts }}" + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11" run_once: true """ From 8009bdfe77691532abe5ab37027be36fa45ab811 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:40:30 +0300 Subject: [PATCH 409/720] gluster_volume: Improved parsing of cluster parameter list --- system/gluster_volume.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 2ea6b974adc..c5d852731c5 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -256,6 +256,7 @@ def probe(host, myhostname): def probe_all_peers(hosts, peers, myhostname): for host in hosts: + host = host.strip() # Clean up any extra space for exact comparison if host not in peers: # dont probe ourselves if myhostname != host: @@ -347,6 +348,11 @@ def main(): if not myhostname: myhostname = socket.gethostname() + # Clean up if last element is empty. Consider that yml can look like this: + # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" + if cluster != None and cluster[-1] == '': + cluster = cluster[0:-1] + if brick_paths != None and "," in brick_paths: brick_paths = brick_paths.split(",") else: From c034d080936a58f9233cf2b8a556abad017ab5c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:55:16 +0300 Subject: [PATCH 410/720] gluster_volume: Finalize brick->bricks transition by previous author --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c5d852731c5..32359cd2a82 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -336,7 +336,7 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_paths = module.params['brick'] + brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] From abab60208d8be3074572dd6fdf287957584fc04e Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 15 May 2015 16:03:40 +0100 Subject: [PATCH 411/720] Change variables from camel case to underscore --- messaging/rabbitmq_exchange.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index 4cceac6b85f..9bb8f69c75f 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -72,14 +72,14 @@ options: required: false choices: [ "yes", "no" ] default: yes - exchangeType: + exchange_type: description: - type for the exchange required: false choices: [ "fanout", "direct", "headers", "topic" ] aliases: [ "type" ] default: direct - autoDelete: + auto_delete: description: - if the exchange should delete itself after all queues/exchanges unbound from it required: false @@ -120,9 +120,9 @@ def main(): login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), durable = dict(default=True, choices=BOOLEANS, type='bool'), - autoDelete = dict(default=False, choices=BOOLEANS, type='bool'), + auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), internal = dict(default=False, choices=BOOLEANS, type='bool'), - exchangeType = dict(default='direct', aliases=['type'], type='str'), + exchange_type = dict(default='direct', aliases=['type'], type='str'), arguments = dict(default=dict(), type='dict') ), supports_check_mode = True @@ -151,17 +151,17 @@ def main(): ) if module.params['state']=='present': - changeRequired = not exchangeExists + change_required = not exchangeExists else: - changeRequired = exchangeExists + change_required = exchangeExists # Check if attributes change on existing exchange - if not changeRequired and r.status_code==200 and module.params['state'] == 'present': + if not change_required and r.status_code==200 and module.params['state'] == 'present': if not ( response['durable'] == module.params['durable'] and - response['auto_delete'] == module.params['autoDelete'] and + response['auto_delete'] == module.params['auto_delete'] and response['internal'] == module.params['internal'] and - response['type'] == module.params['exchangeType'] + response['type'] == module.params['exchange_type'] ): module.fail_json( msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges" @@ -170,14 +170,14 @@ def main(): # Exit if check_mode if module.check_mode: module.exit_json( - changed= changeRequired, + changed= change_required, name = module.params['name'], details = response, arguments = module.params['arguments'] ) # Do changes - if changeRequired: + if change_required: if module.params['state'] == 'present': r = requests.put( url, @@ -185,9 +185,9 @@ def main(): headers = { "content-type": "application/json"}, data = json.dumps({ "durable": module.params['durable'], - "auto_delete": module.params['autoDelete'], + "auto_delete": module.params['auto_delete'], "internal": module.params['internal'], - "type": module.params['exchangeType'], + "type": module.params['exchange_type'], "arguments": module.params['arguments'] }) ) From 46eedaf36894e78a4000889e83108775a67dd345 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 15 May 2015 16:08:10 +0100 Subject: [PATCH 412/720] Change variables from camel case to underscore --- messaging/rabbitmq_queue.py | 72 ++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 1fb274328b6..dc79874b766 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -72,30 +72,30 @@ options: required: false choices: [ "yes", "no" ] default: yes - autoDelete: + auto_delete: description: - if the queue should delete itself after all queues/queues unbound from it required: false choices: [ "yes", "no" ] default: no - messageTTL: + message_ttl: description: - How long a message can live in queue before it is discarded (milliseconds) required: False - autoExpires: + auto_expires: description: - How long a queue can be unused before it is automatically deleted (milliseconds) required: false - maxLength: + max_length: description: - How many messages can the queue contain before it starts rejecting required: false - deadLetterExchange: + dead_letter_exchange: description: - Optional name of an exchange to which messages will be republished if they - are rejected or expire required: false - deadLetterRoutingKey: + dead_letter_routing_key: description: - Optional replacement routing key to use when a message is dead-lettered. - Original routing key will be used if unset @@ -129,12 +129,12 @@ def main(): login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), durable = dict(default=True, choices=BOOLEANS, type='bool'), - autoDelete = dict(default=False, choices=BOOLEANS, type='bool'), - messageTTL = dict(default=None, type='int'), - autoExpire = dict(default=None, type='int'), - maxLength = dict(default=None, type='int'), - deadLetterExchange = dict(default=None, type='str'), - deadLetterRoutingKey = dict(default=None, type='str'), + auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), + message_ttl = dict(default=None, type='int'), + auto_expires = dict(default=None, type='int'), + max_length = dict(default=None, type='int'), + dead_letter_exchange = dict(default=None, type='str'), + dead_letter_routing_key = dict(default=None, type='str'), arguments = dict(default=dict(), type='dict') ), supports_check_mode = True @@ -151,10 +151,10 @@ def main(): r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) if r.status_code==200: - queueExists = True + queue_exists = True response = r.json() elif r.status_code==404: - queueExists = False + queue_exists = False response = r.text else: module.fail_json( @@ -163,34 +163,34 @@ def main(): ) if module.params['state']=='present': - changeRequired = not queueExists + change_required = not queue_exists else: - changeRequired = queueExists + change_required = queue_exists # Check if attributes change on existing queue - if not changeRequired and r.status_code==200 and module.params['state'] == 'present': + if not change_required and r.status_code==200 and module.params['state'] == 'present': if not ( response['durable'] == module.params['durable'] and - response['auto_delete'] == module.params['autoDelete'] and + response['auto_delete'] == module.params['auto_delete'] and ( - ( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['messageTTL'] ) or - ( 'x-message-ttl' not in response['arguments'] and module.params['messageTTL'] is None ) + ( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or + ( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None ) ) and ( - ( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['autoExpire'] ) or - ( 'x-expires' not in response['arguments'] and module.params['autoExpire'] is None ) + ( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or + ( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None ) ) and ( - ( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['maxLength'] ) or - ( 'x-max-length' not in response['arguments'] and module.params['maxLength'] is None ) + ( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or + ( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None ) ) and ( - ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['deadLetterExchange'] ) or - ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['deadLetterExchange'] is None ) + ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or + ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None ) ) and ( - ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['deadLetterRoutingKey'] ) or - ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['deadLetterRoutingKey'] is None ) + ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or + ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None ) ) ): module.fail_json( @@ -200,11 +200,11 @@ def main(): # Copy parameters to arguments as used by RabbitMQ for k,v in { - 'messageTTL': 'x-message-ttl', - 'autoExpire': 'x-expires', - 'maxLength': 'x-max-length', - 'deadLetterExchange': 'x-dead-letter-exchange', - 'deadLetterRoutingKey': 'x-dead-letter-routing-key' + 'message_ttl': 'x-message-ttl', + 'auto_expires': 'x-expires', + 'max_length': 'x-max-length', + 'dead_letter_exchange': 'x-dead-letter-exchange', + 'dead_letter_routing_key': 'x-dead-letter-routing-key' }.items(): if module.params[k]: module.params['arguments'][v] = module.params[k] @@ -212,14 +212,14 @@ def main(): # Exit if check_mode if module.check_mode: module.exit_json( - changed= changeRequired, + changed= change_required, name = module.params['name'], details = response, arguments = module.params['arguments'] ) # Do changes - if changeRequired: + if change_required: if module.params['state'] == 'present': r = requests.put( url, @@ -227,7 +227,7 @@ def main(): headers = { "content-type": "application/json"}, data = json.dumps({ "durable": module.params['durable'], - "auto_delete": module.params['autoDelete'], + "auto_delete": module.params['auto_delete'], "arguments": module.params['arguments'] }) ) From 709817bf19bd1a108ed96f69b90ba6a375cc92ea Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 15 May 2015 16:08:47 +0100 Subject: [PATCH 413/720] Missed variable exchangeExists -> exchange_exists --- messaging/rabbitmq_exchange.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index 9bb8f69c75f..5f6c83c10e6 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -139,10 +139,10 @@ def main(): r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) if r.status_code==200: - exchangeExists = True + exchange_exists = True response = r.json() elif r.status_code==404: - exchangeExists = False + exchange_exists = False response = r.text else: module.fail_json( @@ -151,9 +151,9 @@ def main(): ) if module.params['state']=='present': - change_required = not exchangeExists + change_required = not exchange_exists else: - change_required = exchangeExists + change_required = exchange_exists # Check if attributes change on existing exchange if not change_required and r.status_code==200 and module.params['state'] == 'present': From 83e195578bae937efcf31257cba8914867ca40b9 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Fri, 15 May 2015 16:12:43 +0100 Subject: [PATCH 414/720] Change variables from camel case to underscore --- messaging/rabbitmq_binding.py | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index b8adb94ec6a..285670bf10e 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -73,13 +73,13 @@ options: - destination exchange or queue for the binding required: true aliases: [ "dst", "dest" ] - destinationType: + destination_type: description: - Either queue or exchange required: true choices: [ "queue", "exchange" ] - aliases: [ "type", "destType" ] - routingKey: + aliases: [ "type", "dest_type" ] + routing_key: description: - routing key for the binding - default is # @@ -93,10 +93,10 @@ options: EXAMPLES = ''' # Bind myQueue to directExchange with routing key info -- rabbitmq_binding: name=directExchange destination=myQueue type=queue routingKey=info +- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info # Bind directExchange to topicExchange with routing key *.info -- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routingKey="*.info" +- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info" ''' import requests @@ -114,36 +114,36 @@ def main(): login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), destination = dict(required=True, aliases=[ "dst", "dest"], type='str'), - destinationType = dict(required=True, aliases=[ "type", "destType"], choices=[ "queue", "exchange" ],type='str'), - routingKey = dict(default='#', type='str'), + destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'), + routing_key = dict(default='#', type='str'), arguments = dict(default=dict(), type='dict') ), supports_check_mode = True ) - if module.params['destinationType'] == "queue": - destType="q" + if module.params['destination_type'] == "queue": + dest_type="q" else: - destType="e" + dest_type="e" url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % ( module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), module.params['name'], - destType, + dest_type, module.params['destination'], - urllib.quote(module.params['routingKey'],'') + urllib.quote(module.params['routing_key'],'') ) # Check if exchange already exists r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) if r.status_code==200: - bindingExists = True + binding_exists = True response = r.json() elif r.status_code==404: - bindingExists = False + binding_exists = False response = r.text else: module.fail_json( @@ -152,28 +152,28 @@ def main(): ) if module.params['state']=='present': - changeRequired = not bindingExists + change_required = not binding_exists else: - changeRequired = bindingExists + change_required = binding_exists # Exit if check_mode if module.check_mode: module.exit_json( - changed= changeRequired, + changed= change_required, name = module.params['name'], details = response, arguments = module.params['arguments'] ) # Do changes - if changeRequired: + if change_required: if module.params['state'] == 'present': url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % ( module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), module.params['name'], - destType, + dest_type, module.params['destination'] ) @@ -182,7 +182,7 @@ def main(): auth = (module.params['login_user'],module.params['login_password']), headers = { "content-type": "application/json"}, data = json.dumps({ - "routing_key": module.params['routingKey'], + "routing_key": module.params['routing_key'], "arguments": module.params['arguments'] }) ) From 32fb15e3106280c40afd4d574f6baa991298407d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 11:52:37 -0700 Subject: [PATCH 415/720] Fix quoting for documentation build --- packaging/os/pacman.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index 8aefbef8b0f..463d524ff8e 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -28,7 +28,7 @@ description: Arch Linux and its variants. version_added: "1.0" author: - - "Aaron Bull Schaefer (@elasticdog)" + - "'Aaron Bull Schaefer (@elasticdog)' " - "Afterburn" notes: [] requirements: [] From 88eff11c048f88ed9a49bf1f38a26493083d35a2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 12:12:26 -0700 Subject: [PATCH 416/720] Many more doc fixes --- cloud/misc/virt.py | 6 +++--- cloud/vmware/vmware_datacenter.py | 2 +- database/misc/riak.py | 4 ++-- monitoring/rollbar_deployment.py | 2 +- monitoring/zabbix_group.py | 2 +- notification/pushover.py | 2 +- notification/sendgrid.py | 2 +- notification/slack.py | 2 +- notification/sns.py | 2 +- notification/twilio.py | 2 +- notification/typetalk.py | 2 +- packaging/os/pkg5.py | 2 +- packaging/os/pkg5_publisher.py | 2 +- packaging/os/pkgin.py | 6 +++--- packaging/os/pkgng.py | 2 +- packaging/os/pkgutil.py | 2 +- packaging/os/portinstall.py | 2 +- packaging/os/swdepot.py | 2 +- packaging/os/urpmi.py | 2 +- system/ufw.py | 8 ++++---- system/zfs.py | 2 +- windows/win_chocolatey.py | 6 +++--- windows/win_updates.py | 2 +- 23 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index 540dddc3ba4..343a3eedcf7 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -58,10 +58,10 @@ options: requirements: - "python >= 2.6" - "libvirt-python" -author: +author: - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" - - "Seth Vidal (@skvidal)" + - '"Michael DeHaan (@mpdehaan)" ' + - '"Seth Vidal (@skvidal)" ' ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py index b9101fc2626..b1e995b965b 100644 --- a/cloud/vmware/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -25,7 +25,7 @@ short_description: Manage VMware vSphere Datacenters description: - Manage VMware vSphere Datacenters version_added: 2.0 -author: "Joseph Callen (@jcpowermac)" +author: '"Joseph Callen (@jcpowermac)" ' notes: - Tested on vSphere 5.5 requirements: diff --git a/database/misc/riak.py b/database/misc/riak.py index f6c0d64ee42..4f10775a5ad 100644 --- a/database/misc/riak.py +++ b/database/misc/riak.py @@ -27,8 +27,8 @@ description: the status of the cluster. version_added: "1.2" author: - - "James Martin (@jsmartin)" - - "Drew Kerrigan (@drewkerrigan)" + - '"James Martin (@jsmartin)" ' + - '"Drew Kerrigan (@drewkerrigan)" ' options: command: description: diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 5a643697e5b..dc064d6194d 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rollbar_deployment version_added: 1.6 -author: "Max Riveiro (@kavu)" +author: '"Max Riveiro (@kavu)" ' short_description: Notify Rollbar about app deployments description: - Notify Rollbar about app deployments diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 73aa9400a6d..f622de5a4f7 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -64,7 +64,7 @@ options: required: true notes: - The module has been tested with Zabbix Server 2.2. -author: "René Moser (@resmo)" +author: '"René Moser (@resmo)" ' ''' EXAMPLES = ''' diff --git a/notification/pushover.py b/notification/pushover.py index c4d1333e36c..951c65f43fe 100644 --- a/notification/pushover.py +++ b/notification/pushover.py @@ -48,7 +48,7 @@ options: description: Message priority (see u(https://pushover.net) for details.) required: false -author: "Jim Richardson (@weaselkeeper)" +author: '"Jim Richardson (@weaselkeeper)" ' ''' EXAMPLES = ''' diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 4893fea7fe3..6278f613ee4 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -53,7 +53,7 @@ options: the desired subject for the email required: true -author: "Matt Makai (@makaimc)" +author: '"Matt Makai (@makaimc)" ' ''' EXAMPLES = ''' diff --git a/notification/slack.py b/notification/slack.py index 61d01c7d443..7e5215479ab 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -24,7 +24,7 @@ short_description: Send Slack notifications description: - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration version_added: 1.6 -author: "Ramon de la Fuente (@ramondelafuente)" +author: '"Ramon de la Fuente (@ramondelafuente)" ' options: domain: description: diff --git a/notification/sns.py b/notification/sns.py index 16b02e66486..910105f0ebb 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages description: - The M(sns) module sends notifications to a topic on your Amazon SNS account version_added: 1.6 -author: "Michael J. Schultz (@mjschultz)" +author: '"Michael J. Schultz (@mjschultz)" ' options: msg: description: diff --git a/notification/twilio.py b/notification/twilio.py index 44366158ee1..568d0c60a58 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -58,7 +58,7 @@ options: (multimedia message) instead of a plain SMS required: false -author: "Matt Makai (@makaimc)" +author: '"Matt Makai (@makaimc)" ' ''' EXAMPLES = ''' diff --git a/notification/typetalk.py b/notification/typetalk.py index 1afd8d47045..8e79a7617ed 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -26,7 +26,7 @@ options: - message body required: true requirements: [ urllib, urllib2, json ] -author: "Takashi Someda (@tksmd)" +author: '"Takashi Someda (@tksmd)" ' ''' EXAMPLES = ''' diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index be0e24214ea..632a36796dc 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5 -author: "Peter Oliver (@mavit)" +author: '"Peter Oliver (@mavit)" ' short_description: Manages packages with the Solaris 11 Image Packaging System version_added: 1.9 description: diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 43d00ce4b25..1db07d512b7 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5_publisher -author: "Peter Oliver (@mavit)" +author: '"Peter Oliver (@mavit)" ' short_description: Manages Solaris 11 Image Packaging System publishers version_added: 1.9 description: diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py index e1a973c2d30..33bcb5482f0 100644 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -30,9 +30,9 @@ description: - "The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" -author: - - "Larry Gilbert (L2G)" - - "Shaun Zinck (@szinck)" +author: + - '"Larry Gilbert (L2G)" ' + - '"Shaun Zinck (@szinck)" ' notes: - "Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index 91ee0743e10..132cff637e6 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,7 +63,7 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false -author: "bleader (@bleader)" +author: '"bleader (@bleader)" ' notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. ''' diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index a735b6d9ed0..62107aa0475 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -32,7 +32,7 @@ description: - Pkgutil is an advanced packaging system, which resolves dependency on installation. It is designed for CSW packages. version_added: "1.3" -author: "Alexander Winkler (@dermute)" +author: '"Alexander Winkler (@dermute)" ' options: name: description: diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index 3c9d75767fd..1673c4dde37 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -43,7 +43,7 @@ options: choices: [ 'yes', 'no' ] required: false default: yes -author: "berenddeboer (@berenddeboer)" +author: '"berenddeboer (@berenddeboer)" ' ''' EXAMPLES = ''' diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py index 74c886ca75d..56b33d401bf 100644 --- a/packaging/os/swdepot.py +++ b/packaging/os/swdepot.py @@ -29,7 +29,7 @@ description: - Will install, upgrade and remove packages with swdepot package manager (HP-UX) version_added: "1.4" notes: [] -author: "Raul Melo (@melodous)" +author: '"Raul Melo (@melodous)" ' options: name: description: diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index 63fe6c7f8ef..c202ee27ace 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -57,7 +57,7 @@ options: required: false default: yes choices: [ "yes", "no" ] -author: "Philippe Makowski (@pmakowski)" +author: '"Philippe Makowski (@pmakowski)" ' notes: [] ''' diff --git a/system/ufw.py b/system/ufw.py index f9c1f3b57e9..3694f2b937a 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -28,10 +28,10 @@ short_description: Manage firewall with UFW description: - Manage firewall with UFW. version_added: 1.6 -author: - - "Aleksey Ovcharenko (@ovcharenko)" - - "Jarno Keskikangas (@pyykkis)" - - "Ahti Kitsik (@ahtik)" +author: + - '"Aleksey Ovcharenko (@ovcharenko)" ' + - '"Jarno Keskikangas (@pyykkis)" ' + - '"Ahti Kitsik (@ahtik)" ' notes: - See C(man ufw) for more examples. requirements: diff --git a/system/zfs.py b/system/zfs.py index 7a5cc205225..fed17b4a18d 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -206,7 +206,7 @@ options: - The zoned property. required: False choices: ['on','off'] -author: "Johan Wiren (@johanwiren)" +author: '"Johan Wiren (@johanwiren)" ' ''' EXAMPLES = ''' diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index ad0389aa398..63ec1ecd214 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -86,9 +86,9 @@ options: require: false default: c:\\ansible-playbook.log aliases: [] -author: - - "Trond Hindenes (@trondhindenes)" - - "Peter Mounce (@petemounce)" +author: + - '"Trond Hindenes (@trondhindenes)" ' + - '"Peter Mounce (@petemounce)" ' ''' # TODO: diff --git a/windows/win_updates.py b/windows/win_updates.py index 49d2eac3a2e..7c93109efb9 100644 --- a/windows/win_updates.py +++ b/windows/win_updates.py @@ -41,7 +41,7 @@ options: - (anything that is a valid update category) default: critical aliases: [] -author: "Peter Mounce (@petemounce)" +author: '"Peter Mounce (@petemounce)" ' ''' EXAMPLES = ''' From 6719cbc5a29f40adf224a64313dcc5e270f2d223 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:37:40 -0500 Subject: [PATCH 417/720] Rewriting to use default ansible http libraries. --- notification/hipchat.py | 146 +--------------------------------------- 1 file changed, 1 insertion(+), 145 deletions(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index ea81d2f55f4..84e01d3eb51 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -1,145 +1 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: hipchat -version_added: "1.2" -short_description: Send a message to hipchat -description: - - Send a message to hipchat -options: - token: - description: - - API token. - required: true - room: - description: - - ID or name of the room. - required: true - from: - description: - - Name the message will appear be sent from. max 15 characters. - Over 15, will be shorten. - required: false - default: Ansible - msg: - description: - - The message body. - required: true - default: null - color: - description: - - Background color for the message. Default is yellow. - required: false - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - description: - - message format. html or text. Default is text. - required: false - default: text - choices: [ "text", "html" ] - notify: - description: - - notify or not (change the tab color, play a sound, etc) - required: false - default: 'yes' - choices: [ "yes", "no" ] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - api: - description: - - API url if using a self-hosted hipchat server - required: false - default: 'https://api.hipchat.com/v2/room/{id_or_name}/message' - version_added: 1.6.0 - - -# informational: requirements for nodes -requirements: [ urllib, urllib2, requests, json ] -author: WAKAYAMA Shirou, BOURDEL Paul -''' - -EXAMPLES = ''' -- hipchat: token=AAAAAA room=notify msg="Ansible task finished" -''' - -# =========================================== -# HipChat module specific support methods. -# - -MSG_URI = "https://api.hipchat.com/v2/room/{id_or_name}/message" -NOTIFY_URI = "https://api.hipchat.com/v2/room/{id_or_name}/notification" - -def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI): - '''sending message to hipchat''' - - - payload = {'message': msg, 'color': color} - url_params = {'auth_token': token} - if notify: - POST_URL = NOTIFY_URI - else: - POST_URL = MSG_URI - - response = requests.post(POST_URL.replace('{id_or_name}',room), json=payload, params=url_params) - - if response.status_code == 201 or response.status_code == 204: - return response.json - else: - module.fail_json(msg="failed to send message, return status=%s" % str(response.status_code)) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs = dict(default='yes', type='bool'), - api = dict(default=MSG_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -import requests, json - -main() +#!/usr/bin/python # -*- coding: utf-8 -*- DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to hipchat description: - Send a message to hipchat options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear be sent from. max 15 characters. Over 15, will be shorten. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. Default is yellow. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - message format. html or text. Default is text. required: false default: text choices: [ "text", "html" ] notify: description: - notify or not (change the tab color, play a sound, etc) required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 # informational: requirements for nodes requirements: [ urllib, urllib2, requests, json ] author: WAKAYAMA Shirou, BOURDEL Paul ''' EXAMPLES = ''' - hipchat: token=AAAAAA room=notify msg="Ansible task finished" ''' # =========================================== # HipChat module specific support methods. # DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" MSG_URI_V2 = "/room/{id_or_name}/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' print "Sending message to v1 server" params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api if notify: params['notify'] = 1 else: params['notify'] = 0 url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V2): '''sending message to hipchat v2 server''' print "Sending message to v2 server" headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format if notify: POST_URL = api + NOTIFY_URI_V2 else: POST_URL = api + MSG_URI_V2 url = POST_URL.replace('{id_or_name}',room) data = json.dumps(body) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs = dict(default='yes', type='bool'), api = dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = module.params["room"] msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() \ No newline at end of file From 0a1784736ccb7676141b921a11543ed736a02068 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:46:08 -0500 Subject: [PATCH 418/720] adding check for module.check_mode to v2 message method --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index a3504c0ee10..57db6be0fd7 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -1 +1 @@ -#!/usr/bin/python # -*- coding: utf-8 -*- DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to hipchat description: - Send a message to hipchat options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear be sent from. max 15 characters. Over 15, will be shorten. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. Default is yellow. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - message format. html or text. Default is text. required: false default: text choices: [ "text", "html" ] notify: description: - notify or not (change the tab color, play a sound, etc) required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 # informational: requirements for nodes requirements: [ urllib, urllib2 ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul" ''' EXAMPLES = ''' - hipchat: token=AAAAAA room=notify msg="Ansible task finished" ''' # =========================================== # HipChat module specific support methods. # DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" MSG_URI_V2 = "/room/{id_or_name}/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' print "Sending message to v1 server" params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api if notify: params['notify'] = 1 else: params['notify'] = 0 url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V2): '''sending message to hipchat v2 server''' print "Sending message to v2 server" headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format if notify: POST_URL = api + NOTIFY_URI_V2 else: POST_URL = api + MSG_URI_V2 url = POST_URL.replace('{id_or_name}',room) data = json.dumps(body) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs=dict(default='yes', type='bool'), api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = module.params["room"] msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() \ No newline at end of file +#!/usr/bin/python # -*- coding: utf-8 -*- DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to hipchat description: - Send a message to hipchat options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear be sent from. max 15 characters. Over 15, will be shorten. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. Default is yellow. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - message format. html or text. Default is text. required: false default: text choices: [ "text", "html" ] notify: description: - notify or not (change the tab color, play a sound, etc) required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 # informational: requirements for nodes requirements: [ urllib, urllib2 ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul" ''' EXAMPLES = ''' - hipchat: token=AAAAAA room=notify msg="Ansible task finished" ''' # =========================================== # HipChat module specific support methods. # DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" MSG_URI_V2 = "/room/{id_or_name}/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' print "Sending message to v1 server" params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api if notify: params['notify'] = 1 else: params['notify'] = 0 url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V2): '''sending message to hipchat v2 server''' print "Sending message to v2 server" headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format if notify: POST_URL = api + NOTIFY_URI_V2 else: POST_URL = api + MSG_URI_V2 url = POST_URL.replace('{id_or_name}',room) data = json.dumps(body) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs=dict(default='yes', type='bool'), api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = module.params["room"] msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() \ No newline at end of file From 98e3ee36b7c85763a00c5ea5645672fc6ad9dd03 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:54:07 -0500 Subject: [PATCH 419/720] adding new line to end of file --- notification/hipchat.py | 197 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 196 insertions(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 57db6be0fd7..a7bf1fdc69f 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -1 +1,196 @@ -#!/usr/bin/python # -*- coding: utf-8 -*- DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to hipchat description: - Send a message to hipchat options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear be sent from. max 15 characters. Over 15, will be shorten. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. Default is yellow. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - message format. html or text. Default is text. required: false default: text choices: [ "text", "html" ] notify: description: - notify or not (change the tab color, play a sound, etc) required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 # informational: requirements for nodes requirements: [ urllib, urllib2 ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul" ''' EXAMPLES = ''' - hipchat: token=AAAAAA room=notify msg="Ansible task finished" ''' # =========================================== # HipChat module specific support methods. # DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" MSG_URI_V2 = "/room/{id_or_name}/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' print "Sending message to v1 server" params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api if notify: params['notify'] = 1 else: params['notify'] = 0 url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urllib.urlencode(params) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V2): '''sending message to hipchat v2 server''' print "Sending message to v2 server" headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format if notify: POST_URL = api + NOTIFY_URI_V2 else: POST_URL = api + MSG_URI_V2 url = POST_URL.replace('{id_or_name}',room) data = json.dumps(body) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs=dict(default='yes', type='bool'), api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = module.params["room"] msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() \ No newline at end of file +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: hipchat +version_added: "1.2" +short_description: Send a message to hipchat +description: + - Send a message to hipchat +options: + token: + description: + - API token. + required: true + room: + description: + - ID or name of the room. + required: true + from: + description: + - Name the message will appear be sent from. max 15 characters. + Over 15, will be shorten. + required: false + default: Ansible + msg: + description: + - The message body. + required: true + default: null + color: + description: + - Background color for the message. Default is yellow. + required: false + default: yellow + choices: [ "yellow", "red", "green", "purple", "gray", "random" ] + msg_format: + description: + - message format. html or text. Default is text. + required: false + default: text + choices: [ "text", "html" ] + notify: + description: + - notify or not (change the tab color, play a sound, etc) + required: false + default: 'yes' + choices: [ "yes", "no" ] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + api: + description: + - API url if using a self-hosted hipchat server + required: false + default: 'https://api.hipchat.com/v1' + version_added: 1.6.0 + + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul" +''' + +EXAMPLES = ''' +- hipchat: token=AAAAAA room=notify msg="Ansible task finished" +''' + +# =========================================== +# HipChat module specific support methods. +# + +DEFAULT_URI = "https://api.hipchat.com/v1" + +MSG_URI_V1 = "/rooms/message" + +MSG_URI_V2 = "/room/{id_or_name}/message" +NOTIFY_URI_V2 = "/room/{id_or_name}/notification" + +def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V1): + '''sending message to hipchat v1 server''' + print "Sending message to v1 server" + + params = {} + params['room_id'] = room + params['from'] = msg_from[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['api'] = api + + if notify: + params['notify'] = 1 + else: + params['notify'] = 0 + + url = api + MSG_URI_V1 + "?auth_token=%s" % (token) + data = urllib.urlencode(params) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + + +def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V2): + '''sending message to hipchat v2 server''' + print "Sending message to v2 server" + + headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} + + body = dict() + body['message'] = msg + body['color'] = color + body['message_format'] = msg_format + + if notify: + POST_URL = api + NOTIFY_URI_V2 + else: + POST_URL = api + MSG_URI_V2 + + url = POST_URL.replace('{id_or_name}',room) + data = json.dumps(body) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + room=dict(required=True), + msg=dict(required=True), + msg_from=dict(default="Ansible", aliases=['from']), + color=dict(default="yellow", choices=["yellow", "red", "green", + "purple", "gray", "random"]), + msg_format=dict(default="text", choices=["text", "html"]), + notify=dict(default=True, type='bool'), + validate_certs=dict(default='yes', type='bool'), + api=dict(default=DEFAULT_URI), + ), + supports_check_mode=True + ) + + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + msg_from = module.params["msg_from"] + color = module.params["color"] + msg_format = module.params["msg_format"] + notify = module.params["notify"] + api = module.params["api"] + + try: + if api.find('/v2') != -1: + send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) + else: + send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) + except Exception, e: + module.fail_json(msg="unable to sent msg: %s" % e) + + changed = True + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() + From 452a590af8e336c1a601a4ec8e8a23a48ca5e50a Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:54:53 -0500 Subject: [PATCH 420/720] adding author username --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index a7bf1fdc69f..eb4ab711afc 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -64,7 +64,7 @@ options: # informational: requirements for nodes requirements: [ urllib, urllib2 ] -author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul" +author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' EXAMPLES = ''' From aa93ff944aafdc788e36d514ef18bf074807eae7 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:56:06 -0500 Subject: [PATCH 421/720] fixing grammar on error message --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index eb4ab711afc..81ae2724117 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -183,7 +183,7 @@ def main(): else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) + module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) From 9c0cf447bda034e36d4c18fa521b4bd396abb743 Mon Sep 17 00:00:00 2001 From: Paul Bourdel Date: Fri, 15 May 2015 15:57:10 -0500 Subject: [PATCH 422/720] removing unnecessary white space --- notification/hipchat.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 81ae2724117..2498c11848c 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -114,7 +114,6 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V2): '''sending message to hipchat v2 server''' @@ -193,4 +192,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() - From 9f41d7834656352529dd3827bd00ab88d218853e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 13 May 2015 23:26:27 +0200 Subject: [PATCH 423/720] cloudstack: avoid logging of secrets --- cloud/cloudstack/cs_account.py | 2 +- cloud/cloudstack/cs_affinitygroup.py | 2 +- cloud/cloudstack/cs_firewall.py | 2 +- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_instancegroup.py | 2 +- cloud/cloudstack/cs_iso.py | 2 +- cloud/cloudstack/cs_portforward.py | 2 +- cloud/cloudstack/cs_securitygroup.py | 2 +- cloud/cloudstack/cs_securitygroup_rule.py | 2 +- cloud/cloudstack/cs_sshkeypair.py | 2 +- cloud/cloudstack/cs_vmsnapshot.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index ccb29e1015f..dd47a3f93d3 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -366,7 +366,7 @@ def main(): timezone = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 1a11fb537db..617a50ee010 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -201,7 +201,7 @@ def main(): state = dict(choices=['present', 'absent'], default='present'), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 5c96d606e68..de38233a12f 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -303,7 +303,7 @@ def main(): account = dict(default=None), project = dict(default=None), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 82d33725250..1d7e2492216 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -785,7 +785,7 @@ def main(): tags = dict(type='list', aliases=[ 'tag' ], default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 71aa4bfa38b..d74648d2313 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -197,7 +197,7 @@ def main(): account = dict(default=None), project = dict(default=None), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 1bdb2ee75cc..9dcd46ea539 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -293,7 +293,7 @@ def main(): is_dynamically_scalable = dict(choices=BOOLEANS, default=False), state = dict(choices=['present', 'absent'], default='present'), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 74519fccb28..dd5ecb12bda 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -402,7 +402,7 @@ def main(): project = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 88908e559e5..2964a918f1f 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -167,7 +167,7 @@ def main(): state = dict(choices=['present', 'absent'], default='present'), project = dict(default=None), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 100a92df4ef..76ddf7207fc 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -401,7 +401,7 @@ def main(): project = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index f24faee41d6..1e3233dd76e 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -219,7 +219,7 @@ def main(): project = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index fc11790579f..1321d35da82 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -292,7 +292,7 @@ def main(): project = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), - api_secret = dict(default=None), + api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), ), From 63e9014e16d3fead0eeb899b8936141d277ca89a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 13 May 2015 23:51:23 +0200 Subject: [PATCH 424/720] cloudstack: cs_affinitygroup: add domain and account support --- cloud/cloudstack/cs_affinitygroup.py | 42 +++++++++++++++++++++------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 617a50ee010..119e875c5bb 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -47,6 +47,16 @@ options: required: false default: 'present' choices: [ 'present', 'absent' ] + domain: + description: + - Domain the affinity group is related to. + required: false + default: null + account: + description: + - Account the affinity group is related to. + required: false + default: null poll_async: description: - Poll async jobs until job has finished. @@ -112,12 +122,16 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): def get_affinity_group(self): if not self.affinity_group: - affinity_group_name = self.module.params.get('name') + affinity_group = self.module.params.get('name') + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') - affinity_groups = self.cs.listAffinityGroups() + affinity_groups = self.cs.listAffinityGroups(**args) if affinity_groups: for a in affinity_groups['affinitygroup']: - if a['name'] == affinity_group_name: + if affinity_group in [ a['name'], a['id'] ]: self.affinity_group = a break return self.affinity_group @@ -142,10 +156,12 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): if not affinity_group: self.result['changed'] = True - args = {} - args['name'] = self.module.params.get('name') - args['type'] = self.get_affinity_type() + args = {} + args['name'] = self.module.params.get('name') + args['type'] = self.get_affinity_type() args['description'] = self.module.params.get('description') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') if not self.module.check_mode: res = self.cs.createAffinityGroup(**args) @@ -156,7 +172,6 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): poll_async = self.module.params.get('poll_async') if res and poll_async: affinity_group = self._poll_job(res, 'affinitygroup') - return affinity_group @@ -165,8 +180,10 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): if affinity_group: self.result['changed'] = True - args = {} - args['name'] = self.module.params.get('name') + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') if not self.module.check_mode: res = self.cs.deleteAffinityGroup(**args) @@ -177,7 +194,6 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): poll_async = self.module.params.get('poll_async') if res and poll_async: res = self._poll_job(res, 'affinitygroup') - return affinity_group @@ -189,6 +205,10 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): self.result['description'] = affinity_group['description'] if 'type' in affinity_group: self.result['affinity_type'] = affinity_group['type'] + if 'domain' in affinity_group: + self.result['domain'] = affinity_group['domain'] + if 'account' in affinity_group: + self.result['account'] = affinity_group['account'] return self.result @@ -199,6 +219,8 @@ def main(): affinty_type = dict(default=None), description = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), + domain = dict(default=None), + account = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), api_secret = dict(default=None, no_log=True), From 7705d1bb503c20a57522bb47939b4f1b71ff5b15 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 14 May 2015 00:03:17 +0200 Subject: [PATCH 425/720] cloudstack: remove self.result, is defined in super class from utils --- cloud/cloudstack/cs_account.py | 1 - cloud/cloudstack/cs_affinitygroup.py | 4 ---- cloud/cloudstack/cs_firewall.py | 4 ---- cloud/cloudstack/cs_instance.py | 1 - cloud/cloudstack/cs_instancegroup.py | 1 - cloud/cloudstack/cs_iso.py | 4 ---- cloud/cloudstack/cs_portforward.py | 2 -- cloud/cloudstack/cs_securitygroup.py | 4 ---- cloud/cloudstack/cs_securitygroup_rule.py | 3 --- cloud/cloudstack/cs_sshkeypair.py | 4 ---- cloud/cloudstack/cs_vmsnapshot.py | 4 ---- 11 files changed, 32 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index dd47a3f93d3..313c786dee3 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -97,7 +97,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # create an account in domain 'CUSTOMERS' local_action: module: cs_account diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 119e875c5bb..e4460b93695 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -66,7 +66,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Create a affinity group - local_action: module: cs_affinitygroup @@ -114,9 +113,6 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } self.affinity_group = None diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index de38233a12f..012005432fc 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -88,7 +88,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 - local_action: module: cs_firewall @@ -171,9 +170,6 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } self.firewall_rule = None diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 1d7e2492216..0b2c9d4c935 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -164,7 +164,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Create a instance on CloudStack from an ISO # NOTE: Names of offerings and ISOs depending on the CloudStack configuration. - local_action: diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index d74648d2313..dc216733c63 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -56,7 +56,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Create an instance group - local_action: module: cs_instancegroup diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 9dcd46ea539..fe92a6baca2 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -99,7 +99,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Register an ISO if ISO name does not already exist. - local_action: module: cs_iso @@ -185,9 +184,6 @@ class AnsibleCloudStackIso(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } self.iso = None def register_iso(self): diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index dd5ecb12bda..127979e3d79 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -106,7 +106,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # 1.2.3.4:80 -> web01:8080 - local_action: module: cs_portforward @@ -144,7 +143,6 @@ EXAMPLES = ''' public_port: 22 private_port: 22 state: absent - ''' RETURN = ''' diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 2964a918f1f..35d8851bdef 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -51,7 +51,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Create a security group - local_action: module: cs_securitygroup @@ -94,9 +93,6 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } self.security_group = None diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 76ddf7207fc..c294e062007 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -194,9 +194,6 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } def _tcp_udp_match(self, rule, protocol, start_port, end_port): diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 1e3233dd76e..995e0b5b81a 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -63,7 +63,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # create a new private / public key pair: - local_action: cs_sshkeypair name=linus@example.com register: key @@ -114,9 +113,6 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } self.ssh_key = None diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 1321d35da82..d5d84bd8ea7 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -81,7 +81,6 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' ---- # Create a VM snapshot of disk and memory before an upgrade - local_action: module: cs_vmsnapshot @@ -175,9 +174,6 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) - self.result = { - 'changed': False, - } def get_snapshot(self): From 2b7a40a46a01a52ffcf78803c7030d01cdf350d5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 14 May 2015 00:18:10 +0200 Subject: [PATCH 426/720] cloudstack: replace old _id() with new generic style from utils --- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_iso.py | 14 +++++++------- cloud/cloudstack/cs_securitygroup.py | 6 +++--- cloud/cloudstack/cs_securitygroup_rule.py | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 0b2c9d4c935..d8db6b683c8 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -536,7 +536,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args_instance_update['group'] = self.module.params.get('group') args_instance_update['displayname'] = self.get_display_name() args_instance_update['userdata'] = self.get_user_data() - args_instance_update['ostypeid'] = self.get_os_type_id() + args_instance_update['ostypeid'] = self.get_os_type('id') args_ssh_key = {} args_ssh_key['id'] = instance['id'] diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index fe92a6baca2..b0048c75f45 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -190,11 +190,11 @@ class AnsibleCloudStackIso(AnsibleCloudStack): iso = self.get_iso() if not iso: args = {} - args['zoneid'] = self.get_zone_id() - args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone('id') + args['projectid'] = self.get_project('id') args['bootable'] = self.module.params.get('bootable') - args['ostypeid'] = self.get_os_type_id() + args['ostypeid'] = self.get_os_type('id') if args['bootable'] and not args['ostypeid']: self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") @@ -221,8 +221,8 @@ class AnsibleCloudStackIso(AnsibleCloudStack): args = {} args['isready'] = self.module.params.get('is_ready') args['isofilter'] = self.module.params.get('iso_filter') - args['projectid'] = self.get_project_id() - args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') # if checksum is set, we only look on that. checksum = self.module.params.get('checksum') @@ -247,8 +247,8 @@ class AnsibleCloudStackIso(AnsibleCloudStack): self.result['changed'] = True args = {} args['id'] = iso['id'] - args['projectid'] = self.get_project_id() - args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') if not self.module.check_mode: res = self.cs.deleteIso(**args) return iso diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 35d8851bdef..ec5f8d59ad4 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -100,7 +100,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): if not self.security_group: sg_name = self.module.params.get('name') args = {} - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project('id') sgs = self.cs.listSecurityGroups(**args) if sgs: for s in sgs['securitygroup']: @@ -117,7 +117,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): args = {} args['name'] = self.module.params.get('name') - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project('id') args['description'] = self.module.params.get('description') if not self.module.check_mode: @@ -136,7 +136,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): args = {} args['name'] = self.module.params.get('name') - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project('id') if not self.module.check_mode: res = self.cs.deleteSecurityGroup(**args) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index c294e062007..cc21ac9022e 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -268,7 +268,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): security_group_name = self.module.params.get('security_group') args = {} args['securitygroupname'] = security_group_name - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project('id') sgs = self.cs.listSecurityGroups(**args) if not sgs or 'securitygroup' not in sgs: self.module.fail_json(msg="security group '%s' not found" % security_group_name) @@ -298,7 +298,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): args['endport'] = self.get_end_port() args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project('id') args['securitygroupid'] = security_group['id'] rule = None From 0f884ead4088933d2a28940e2fd7b0e7b4926fbb Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 16 May 2015 11:45:48 +0200 Subject: [PATCH 427/720] cloudstack: add catch all exceptions and show a user friendly message Also see GH-493. --- cloud/cloudstack/cs_account.py | 3 +++ cloud/cloudstack/cs_affinitygroup.py | 3 +++ cloud/cloudstack/cs_firewall.py | 3 +++ cloud/cloudstack/cs_instance.py | 3 +++ cloud/cloudstack/cs_instancegroup.py | 3 +++ cloud/cloudstack/cs_iso.py | 3 +++ cloud/cloudstack/cs_portforward.py | 3 +++ cloud/cloudstack/cs_securitygroup.py | 3 +++ cloud/cloudstack/cs_securitygroup_rule.py | 3 +++ cloud/cloudstack/cs_sshkeypair.py | 3 +++ cloud/cloudstack/cs_vmsnapshot.py | 3 +++ 11 files changed, 33 insertions(+) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 313c786dee3..399dfa090cc 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -400,6 +400,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index e4460b93695..2a8de46fe41 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -243,6 +243,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 012005432fc..8a63710cf4a 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -327,6 +327,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index d8db6b683c8..1f5cc6ca393 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -825,6 +825,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index dc216733c63..d62004cc94f 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -220,6 +220,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index b0048c75f45..43367530855 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -313,6 +313,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 127979e3d79..aa0401355cd 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -423,6 +423,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index ec5f8d59ad4..73a54fef795 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -187,6 +187,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index cc21ac9022e..ef48b3896ce 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -428,6 +428,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 995e0b5b81a..0d2e2c822f1 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -245,6 +245,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index d5d84bd8ea7..b71901a317f 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -314,6 +314,9 @@ def main(): except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + module.exit_json(**result) # import module snippets From de1f642efdb9907931240054432c2f34c91a9b5a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 16 May 2015 12:45:55 +0200 Subject: [PATCH 428/720] cloudstack: cs_iso: add account and domain --- cloud/cloudstack/cs_iso.py | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 43367530855..0b86b7424e7 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -73,6 +73,16 @@ options: - Register the ISO to be bootable. Only used if C(state) is present. required: false default: true + domain: + description: + - Domain the ISO is related to. + required: false + default: null + account: + description: + - Account the ISO is related to. + required: false + default: null project: description: - Name of the project the ISO to be registered in. @@ -168,6 +178,21 @@ created: returned: success type: string sample: 2015-03-29T14:57:06+0200 +domain: + description: Domain the ISO is related to. + returned: success + type: string + sample: example domain +account: + description: Account the ISO is related to. + returned: success + type: string + sample: example account +project: + description: Project the ISO is related to. + returned: success + type: string + sample: example project ''' try: @@ -191,8 +216,9 @@ class AnsibleCloudStackIso(AnsibleCloudStack): if not iso: args = {} args['zoneid'] = self.get_zone('id') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') - args['bootable'] = self.module.params.get('bootable') args['ostypeid'] = self.get_os_type('id') if args['bootable'] and not args['ostypeid']: @@ -221,6 +247,8 @@ class AnsibleCloudStackIso(AnsibleCloudStack): args = {} args['isready'] = self.module.params.get('is_ready') args['isofilter'] = self.module.params.get('iso_filter') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') args['zoneid'] = self.get_zone('id') @@ -270,6 +298,12 @@ class AnsibleCloudStackIso(AnsibleCloudStack): self.result['is_ready'] = iso['isready'] if 'created' in iso: self.result['created'] = iso['created'] + if 'project' in iso: + self.result['project'] = iso['project'] + if 'domain' in iso: + self.result['domain'] = iso['domain'] + if 'account' in iso: + self.result['account'] = iso['account'] return self.result @@ -281,6 +315,8 @@ def main(): os_type = dict(default=None), zone = dict(default=None), iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + domain = dict(default=None), + account = dict(default=None), project = dict(default=None), checksum = dict(default=None), is_ready = dict(choices=BOOLEANS, default=False), From 46cb5627dc47ae681dbf3c6997d452e29f38f793 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 16 May 2015 13:04:14 +0200 Subject: [PATCH 429/720] cloudstack: cs_iso code style and cleanup --- cloud/cloudstack/cs_iso.py | 56 ++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 0b86b7424e7..749acdf594a 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -214,13 +214,21 @@ class AnsibleCloudStackIso(AnsibleCloudStack): def register_iso(self): iso = self.get_iso() if not iso: - args = {} - args['zoneid'] = self.get_zone('id') - args['domainid'] = self.get_domain('id') - args['account'] = self.get_account('name') - args['projectid'] = self.get_project('id') - args['bootable'] = self.module.params.get('bootable') - args['ostypeid'] = self.get_os_type('id') + + args = {} + args['zoneid'] = self.get_zone('id') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['bootable'] = self.module.params.get('bootable') + args['ostypeid'] = self.get_os_type('id') + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('name') + args['checksum'] = self.module.params.get('checksum') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + if args['bootable'] and not args['ostypeid']: self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") @@ -228,13 +236,6 @@ class AnsibleCloudStackIso(AnsibleCloudStack): if not args['url']: self.module.fail_json(msg="URL is requried.") - args['name'] = self.module.params.get('name') - args['displaytext'] = self.module.params.get('name') - args['checksum'] = self.module.params.get('checksum') - args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') - args['isfeatured'] = self.module.params.get('is_featured') - args['ispublic'] = self.module.params.get('is_public') - self.result['changed'] = True if not self.module.check_mode: res = self.cs.registerIso(**args) @@ -244,13 +245,14 @@ class AnsibleCloudStackIso(AnsibleCloudStack): def get_iso(self): if not self.iso: - args = {} - args['isready'] = self.module.params.get('is_ready') - args['isofilter'] = self.module.params.get('iso_filter') - args['domainid'] = self.get_domain('id') - args['account'] = self.get_account('name') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') + + args = {} + args['isready'] = self.module.params.get('is_ready') + args['isofilter'] = self.module.params.get('iso_filter') + args['domainid'] = self.get_domain('id') + args['account'] = self.get_account('name') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') # if checksum is set, we only look on that. checksum = self.module.params.get('checksum') @@ -273,10 +275,12 @@ class AnsibleCloudStackIso(AnsibleCloudStack): iso = self.get_iso() if iso: self.result['changed'] = True - args = {} - args['id'] = iso['id'] - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') + + args = {} + args['id'] = iso['id'] + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + if not self.module.check_mode: res = self.cs.deleteIso(**args) return iso @@ -310,7 +314,7 @@ class AnsibleCloudStackIso(AnsibleCloudStack): def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), url = dict(default=None), os_type = dict(default=None), zone = dict(default=None), From 7c199cad252468c90a512bf735336285e893a200 Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 16 May 2015 21:53:27 +1000 Subject: [PATCH 430/720] Add dynamodb_table module --- cloud/amazon/dynamodb_table | 261 ++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100644 cloud/amazon/dynamodb_table diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table new file mode 100644 index 00000000000..7a200a3b271 --- /dev/null +++ b/cloud/amazon/dynamodb_table @@ -0,0 +1,261 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: dynamodb_table +short_description: Create, update or delete AWS Dynamo DB tables. +description: + - Create or delete AWS Dynamo DB tables. + - Can update the provisioned throughput on existing tables. + - Returns the status of the specified table. +author: Alan Loi (@loia) +requirements: + - "boto >= 2.13.2" +options: + state: + description: + - Create or delete the table + required: false + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Name of the table. + required: true + hash_key_name: + description: + - Name of the hash key. + - Required when state=present. + required: false + hash_key_type: + description: + - Type of the hash key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + range_key_name: + description: + - Name of the range key. + required: false + range_key_type: + description: + - Type of the range key. + required: false + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + read_capacity: + description: + - Read throughput capacity (units) to provision. + required: false + default: 1 + write_capacity: + description: + - Write throughput capacity (units) to provision. + required: false + default: 1 + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +# Create dynamo table with hash and range primary key +- dynamodb_table: + name: my-table + region: us-east-1 + hash_key_name: id + hash_key_type: STRING + range_key_name: create_time + range_key_type: NUMBER + read_capacity: 2 + write_capacity: 2 + +# Update capacity on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + read_capacity: 10 + write_capacity: 10 + +# Delete dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + state: absent +''' + +try: + import boto + import boto.dynamodb2 + from boto.dynamodb2.table import Table + from boto.dynamodb2.fields import HashKey, RangeKey + from boto.dynamodb2.types import STRING, NUMBER, BINARY + from boto.exception import BotoServerError, JSONResponseError + +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +DYNAMO_TYPE_MAP = { + 'STRING': STRING, + 'NUMBER': NUMBER, + 'BINARY': BINARY +} + + +def create_or_update_dynamo_table(connection, module): + table_name = module.params.get('name') + hash_key_name = module.params.get('hash_key_name') + hash_key_type = module.params.get('hash_key_type') + range_key_name = module.params.get('range_key_name') + range_key_type = module.params.get('range_key_type') + read_capacity = module.params.get('read_capacity') + write_capacity = module.params.get('write_capacity') + + schema = [ + HashKey(hash_key_name, map_dynamo_type(hash_key_type)), + RangeKey(range_key_name, map_dynamo_type(range_key_type)) + ] + throughput = { + 'read': read_capacity, + 'write': write_capacity + } + + result = dict( + region=module.params.get('region'), + table_name=table_name, + hash_key_name=hash_key_name, + hash_key_type=hash_key_type, + range_key_name=range_key_name, + range_key_type=range_key_type, + read_capacity=read_capacity, + write_capacity=write_capacity, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + changed = update_dynamo_table(table, throughput=throughput) + else: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + changed = True + + result['table_status'] = table.describe()['Table']['TableStatus'] + result['changed'] = changed + + except BotoServerError: + result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def delete_dynamo_table(connection, module): + table_name = module.params.get('table_name') + + result = dict( + region=module.params.get('region'), + table_name=table_name, + ) + + try: + changed = False + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + table.delete() + changed = True + + result['changed'] = changed + + except BotoServerError: + result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def dynamo_table_exists(table): + try: + table.describe() + return True + + except JSONResponseError, e: + if e.message and e.message.startswith('Requested resource not found'): + return False + else: + raise e + + +def update_dynamo_table(table, throughput=None): + table.describe() # populate table details + + # AWS complains if the throughput hasn't changed + if has_throughput_changed(table, throughput): + return table.update(throughput=throughput) + + return False + + +def has_throughput_changed(table, new_throughput): + if not new_throughput: + return False + + return new_throughput['read'] != table.throughput['read'] or \ + new_throughput['write'] != table.throughput['write'] + + +def map_dynamo_type(dynamo_type): + return DYNAMO_TYPE_MAP.get(dynamo_type) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='str'), + hash_key_name=dict(required=True, type='str'), + hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + range_key_name=dict(type='str'), + range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + read_capacity=dict(default=1, type='int'), + write_capacity=dict(default=1, type='int'), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + connection = boto.dynamodb2.connect_to_region(region) + + state = module.params.get('state') + if state == 'present': + create_or_update_dynamo_table(connection, module) + elif state == 'absent': + delete_dynamo_table(connection, module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From e98127588459c363e2d82d4b52b0dfee99f22db3 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Mon, 18 May 2015 13:20:22 +0100 Subject: [PATCH 431/720] Added missing default values --- messaging/rabbitmq_exchange.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index 5f6c83c10e6..aed808ed569 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -94,7 +94,8 @@ options: arguments: description: - extra arguments for exchange. If defined this argument is a key/value dictionary - required: false + required: false + default: {} ''' EXAMPLES = ''' From a35dd872632ca751252a04dfea04a362e86c0164 Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Mon, 18 May 2015 13:22:35 +0100 Subject: [PATCH 432/720] Added missing default --- messaging/rabbitmq_binding.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index 285670bf10e..83800ad6985 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -88,7 +88,8 @@ options: arguments: description: - extra arguments for exchange. If defined this argument is a key/value dictionary - required: false + required: false + default: {} ''' EXAMPLES = ''' From b30f112e571cb753efcadc22850e84ad14cf8aaf Mon Sep 17 00:00:00 2001 From: Manuel Sousa Date: Mon, 18 May 2015 13:27:12 +0100 Subject: [PATCH 433/720] Add missing defaults --- messaging/rabbitmq_queue.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index dc79874b766..258192c1bb7 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -82,28 +82,34 @@ options: description: - How long a message can live in queue before it is discarded (milliseconds) required: False + default: forever auto_expires: description: - How long a queue can be unused before it is automatically deleted (milliseconds) required: false + default: forever max_length: description: - How many messages can the queue contain before it starts rejecting required: false + default: no limit dead_letter_exchange: description: - Optional name of an exchange to which messages will be republished if they - are rejected or expire required: false + default: None dead_letter_routing_key: description: - Optional replacement routing key to use when a message is dead-lettered. - Original routing key will be used if unset required: false + default: None arguments: description: - extra arguments for queue. If defined this argument is a key/value dictionary - required: false + required: false + default: {} ''' EXAMPLES = ''' From f2b085d01568a26d17ab2d1aaa8ff81220bbb2a4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 18 May 2015 15:36:09 +0200 Subject: [PATCH 434/720] rabbitmq; update author for new rabbitmq modules --- messaging/rabbitmq_binding.py | 2 +- messaging/rabbitmq_exchange.py | 2 +- messaging/rabbitmq_queue.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index 285670bf10e..f09b949b243 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_binding -author: Manuel Sousa +author: '"Manuel Sousa (@manuel-sousa)" ' version_added: "2.0" short_description: This module manages rabbitMQ bindings diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index 5f6c83c10e6..a2e9d2da9db 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_exchange -author: Manuel Sousa +author: '"Manuel Sousa (@manuel-sousa)" ' version_added: "2.0" short_description: This module manages rabbitMQ exchanges diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index dc79874b766..bcc08f046a5 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_queue -author: Manuel Sousa +author: '"Manuel Sousa (@manuel-sousa)" ' version_added: "2.0" short_description: This module manages rabbitMQ queues From 576d94e8d4fa8e79216441efd65be62cfb0c603f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 12:08:15 -0700 Subject: [PATCH 435/720] Update module_utils "import" to the new-style Fixes #505 --- monitoring/zabbix_screen.py | 6 ++++-- network/f5/bigip_facts.py | 6 ++++-- packaging/os/homebrew.py | 7 +++++-- packaging/os/homebrew_cask.py | 7 +++++-- packaging/os/homebrew_tap.py | 6 ++++-- 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 932681617f5..5ada0447571 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -415,5 +415,7 @@ def main(): else: module.exit_json(changed=False) -# <> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 4522d61bbad..7b78c6d97f7 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -1684,6 +1684,8 @@ def main(): module.exit_json(**result) # include magic from lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index f6d63b17d3c..b519efa071f 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -834,5 +834,8 @@ def main(): module.exit_json(changed=changed, msg=message) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() + diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index 292da6c7f59..bb5cacabbc7 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -509,5 +509,8 @@ def main(): module.exit_json(changed=changed, msg=message) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() + diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index 1e0b6b66169..504e77eb062 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -211,5 +211,7 @@ def main(): module.exit_json(changed=changed, msg=msg) # this is magic, see lib/ansible/module_common.py -#<> -main() +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 2da7792ff693402a0edc6a3f6a5f01f1367b9d59 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 19 May 2015 09:33:04 +0200 Subject: [PATCH 436/720] cloudstack: cs_firewall: add egress support Added functionality to set rules for egress using this module at these are very similar. The only real difference is that egress firewall API uses the networkid. That is why the new arguments `type` for choosing `egress` or `ingress` and `network` was added. For `type=ingress`, which is the default, `ip_address` is required and for `type=egress` the argument `network` is required. --- cloud/cloudstack/cs_firewall.py | 164 +++++++++++++++++++++++++++----- 1 file changed, 142 insertions(+), 22 deletions(-) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 8a63710cf4a..c9e42be4a4f 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -29,20 +29,35 @@ author: '"René Moser (@resmo)" ' options: ip_address: description: - - Public IP address the rule is assigned to. - required: true + - Public IP address the ingress rule is assigned to. + - Required if C(type=ingress). + required: false + default: null + network: + description: + - Network the egress rule is related to. + - Required if C(type=egress). + required: false + default: null state: description: - State of the firewall rule. required: false default: 'present' choices: [ 'present', 'absent' ] + type: + description: + - Type of the firewall rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] protocol: description: - Protocol of the firewall rule. + - C(all) is only available if C(type=egress) required: false default: 'tcp' - choices: [ 'tcp', 'udp', 'icmp' ] + choices: [ 'tcp', 'udp', 'icmp', 'all' ] cidr: description: - CIDR (full notation) to be used for firewall rule. @@ -84,6 +99,11 @@ options: - Name of the project the firewall rule is related to. required: false default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true extends_documentation_fragment: cloudstack ''' @@ -115,15 +135,37 @@ EXAMPLES = ''' end_port: 8888 cidr: 17.0.0.0/8 state: absent + + +# Allow all outbound traffic +- local_action: + module: cs_firewall + network: my_network + type: egress + protocol: all + + +# Allow only HTTP outbound traffic for an IP +- local_action: + module: cs_firewall + network: my_network + type: egress + port: 80 + cidr: 10.101.1.20 ''' RETURN = ''' --- ip_address: - description: IP address of the rule. + description: IP address of the rule if C(type=ingress) returned: success type: string sample: 10.100.212.10 +type: + description: Type of the rule. + returned: success + type: string + sample: ingress cidr: description: CIDR of the rule. returned: success @@ -154,6 +196,11 @@ icmp_type: returned: success type: int sample: 1 +network: + description: Name of the network if C(type=egress) + returned: success + type: string + sample: my_network ''' try: @@ -181,32 +228,46 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): def get_firewall_rule(self): if not self.firewall_rule: - cidr = self.module.params.get('cidr') - protocol = self.module.params.get('protocol') - start_port = self.module.params.get('start_port') - end_port = self.get_end_port() - icmp_code = self.module.params.get('icmp_code') - icmp_type = self.module.params.get('icmp_type') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_end_port() + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + fw_type = self.module.params.get('type') if protocol in ['tcp', 'udp'] and not (start_port and end_port): - self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol) if protocol == 'icmp' and not icmp_type: - self.module.fail_json(msg="no icmp_type set") + self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type") + + if protocol == 'all' and fw_type != 'egress': + self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'" ) args = {} - args['ipaddressid'] = self.get_ip_address('id') args['account'] = self.get_account('name') args['domainid'] = self.get_domain('id') args['projectid'] = self.get_project('id') - firewall_rules = self.cs.listFirewallRules(**args) + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + if not args['networkid']: + self.module.fail_json(msg="missing required argument for type egress: network") + firewall_rules = self.cs.listEgressFirewallRules(**args) + else: + args['ipaddressid'] = self.get_ip_address('id') + if not args['ipaddressid']: + self.module.fail_json(msg="missing required argument for type ingress: ip_address") + firewall_rules = self.cs.listFirewallRules(**args) + if firewall_rules and 'firewallrule' in firewall_rules: for rule in firewall_rules['firewallrule']: type_match = self._type_cidr_match(rule, cidr) protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \ - or self._icmp_match(rule, protocol, icmp_code, icmp_type) + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._egress_all_match(rule, protocol, fw_type) if type_match and protocol_match: self.firewall_rule = rule @@ -221,6 +282,12 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): and end_port == int(rule['endport']) + def _egress_all_match(self, rule, protocol, fw_type): + return protocol in ['all'] \ + and protocol == rule['protocol'] \ + and fw_type == 'egress' + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): return protocol == 'icmp' \ and protocol == rule['protocol'] \ @@ -232,6 +299,30 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): return cidr == rule['cidrlist'] + def get_network(self, key=None, network=None): + if not network: + network = self.module.params.get('network') + + if not network: + return None + + args = {} + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + args['zoneid'] = self.get_zone('id') + + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + for n in networks['network']: + if network in [ n['displaytext'], n['name'], n['id'] ]: + return self._get_by_key(key, n) + break + self.module.fail_json(msg="Network '%s' not found" % network) + + def create_firewall_rule(self): firewall_rule = self.get_firewall_rule() if not firewall_rule: @@ -244,11 +335,22 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): args['endport'] = self.get_end_port() args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') - args['ipaddressid'] = self.get_ip_address('id') + fw_type = self.module.params.get('type') if not self.module.check_mode: - firewall_rule = self.cs.createFirewallRule(**args) - + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + res = self.cs.createEgressFirewallRule(**args) + else: + args['ipaddressid'] = self.get_ip_address('id') + res = self.cs.createFirewallRule(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + firewall_rule = self._poll_job(res, 'firewallrule') return firewall_rule @@ -256,17 +358,29 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): firewall_rule = self.get_firewall_rule() if firewall_rule: self.result['changed'] = True - args = {} + + args = {} args['id'] = firewall_rule['id'] + fw_type = self.module.params.get('type') if not self.module.check_mode: - res = self.cs.deleteFirewallRule(**args) + if fw_type == 'egress': + res = self.cs.deleteEgressFirewallRule(**args) + else: + res = self.cs.deleteFirewallRule(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'firewallrule') return firewall_rule def get_result(self, firewall_rule): if firewall_rule: + self.result['type'] = self.module.params.get('type') if 'cidrlist' in firewall_rule: self.result['cidr'] = firewall_rule['cidrlist'] if 'startport' in firewall_rule: @@ -281,15 +395,19 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack): self.result['icmp_code'] = int(firewall_rule['icmpcode']) if 'icmptype' in firewall_rule: self.result['icmp_type'] = int(firewall_rule['icmptype']) + if 'networkid' in firewall_rule: + self.result['network'] = self.get_network(key='displaytext', network=firewall_rule['networkid']) return self.result def main(): module = AnsibleModule( argument_spec = dict( - ip_address = dict(required=True), + ip_address = dict(default=None), + network = dict(default=None), cidr = dict(default='0.0.0.0/0'), - protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), + type = dict(choices=['ingress', 'egress'], default='ingress'), icmp_type = dict(type='int', default=None), icmp_code = dict(type='int', default=None), start_port = dict(type='int', aliases=['port'], default=None), @@ -298,6 +416,7 @@ def main(): domain = dict(default=None), account = dict(default=None), project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), @@ -306,6 +425,7 @@ def main(): mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], + ['ip_address', 'network'], ), supports_check_mode=True ) From 525ef4724784a491838711c04545f808c31045c3 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 19 May 2015 10:08:31 +0200 Subject: [PATCH 437/720] cloudstack: fix public and private ports results not int --- cloud/cloudstack/cs_portforward.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index aa0401355cd..123da67e2bc 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -33,7 +33,8 @@ options: required: true vm: description: - - Name of virtual machine which we make the port forwarding rule for. Required if C(state=present). + - Name of virtual machine which we make the port forwarding rule for. + - Required if C(state=present). required: false default: null state: @@ -54,7 +55,8 @@ options: required: true public_end_port: description: - - End public port for this rule. If not specific, equal C(public_port). + - End public port for this rule. + - If not specified equal C(public_port). required: false default: null private_port: @@ -63,7 +65,8 @@ options: required: true private_end_port: description: - - End private port for this rule. If not specific, equal C(private_port) + - End private port for this rule. + - If not specified equal C(private_port). required: false default: null open_firewall: @@ -362,13 +365,13 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): if 'vmguestip' in portforwarding_rule: self.result['vm_guest_ip'] = portforwarding_rule['vmguestip'] if 'publicport' in portforwarding_rule: - self.result['public_port'] = portforwarding_rule['publicport'] + self.result['public_port'] = int(portforwarding_rule['publicport']) if 'publicendport' in portforwarding_rule: - self.result['public_end_port'] = portforwarding_rule['publicendport'] + self.result['public_end_port'] = int(portforwarding_rule['publicendport']) if 'privateport' in portforwarding_rule: - self.result['private_port'] = portforwarding_rule['privateport'] + self.result['private_port'] = int(portforwarding_rule['privateport']) if 'privateendport' in portforwarding_rule: - self.result['private_end_port'] = portforwarding_rule['privateendport'] + self.result['private_end_port'] = int(portforwarding_rule['privateendport']) if 'protocol' in portforwarding_rule: self.result['protocol'] = portforwarding_rule['protocol'] if 'tags' in portforwarding_rule: From d1c896d31edf7bea35c02bd641555626b4caa79b Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Fri, 1 May 2015 21:17:34 +0100 Subject: [PATCH 438/720] win_scheduled_task module for windows Fledgling module to allow scheduled tasks to be managed. At present, I only need enabled/disabled support. There's lots of scope for more features. --- windows/win_scheduled_task.ps1 | 77 ++++++++++++++++++++++++++++++++++ windows/win_scheduled_task.py | 54 ++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 windows/win_scheduled_task.ps1 create mode 100644 windows/win_scheduled_task.py diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 new file mode 100644 index 00000000000..2716ed32ea9 --- /dev/null +++ b/windows/win_scheduled_task.ps1 @@ -0,0 +1,77 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Peter Mounce +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +$ErrorActionPreference = "Stop" + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +if ($params.name) +{ + $package = $params.name +} +else +{ + Fail-Json $result "missing required argument: name" +} +if ($params.state) +{ + $state = $params.state.ToString() + if (($state -ne 'Enabled') -and ($state -ne 'Disabled')) + { + Fail-Json $result "state is '$state'; must be 'Enabled' or 'Disabled'" + } +} +else +{ + $state = "Enabled" +} + + +try +{ + $tasks = Get-ScheduledTask -TaskPath $name + $tasks_needing_changing |? { $_.State -ne $state } + if ($tasks_needing_changing -eq $null) + { + if ($state -eq 'Disabled') + { + $tasks_needing_changing | Disable-ScheduledTask + } + elseif ($state -eq 'Enabled') + { + $tasks_needing_changing | Enable-ScheduledTask + } + Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) + $result.changed = $true + } + else + { + Set-Attr $result "tasks_changed" @() + $result.changed = $false + } + Exit-Json $result; +} +catch +{ + Fail-Json $result $_.Exception.Message +} diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py new file mode 100644 index 00000000000..ac353c14c0a --- /dev/null +++ b/windows/win_scheduled_task.py @@ -0,0 +1,54 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Peter Mounce +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +DOCUMENTATION = ''' +--- +module: win_scheduled_task +version_added: "1.9" +short_description: Manage scheduled tasks +description: + - Manage scheduled tasks +options: + name: + description: + - Name of the scheduled task + - Supports * as wildcard + required: true + default: null + aliases: [] + state: + description: + - State that the task should become + required: false + choices: + - Disabled + - Enabled + default: Enabled + aliases: [] +author: Peter Mounce +''' + +EXAMPLES = ''' + # Disable the scheduled tasks with "WindowsUpdate" in their name + win_scheduled_task: name="*WindowsUpdate*" state=disabled +''' From 6f1d9fbbccea3c37f3ab672a544903297da311a5 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Sat, 2 May 2015 13:56:01 +0100 Subject: [PATCH 439/720] correct variable name --- windows/win_scheduled_task.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 2716ed32ea9..763bfb53862 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -27,7 +27,7 @@ Set-Attr $result "changed" $false; if ($params.name) { - $package = $params.name + $name = $params.name } else { From 4fef779f09b0d3b8d7fd7fa893d54c4fc09f2475 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Sat, 2 May 2015 17:24:30 +0100 Subject: [PATCH 440/720] caught out by syntax --- windows/win_scheduled_task.ps1 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 763bfb53862..52b68dd5b6a 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -50,8 +50,8 @@ else try { $tasks = Get-ScheduledTask -TaskPath $name - $tasks_needing_changing |? { $_.State -ne $state } - if ($tasks_needing_changing -eq $null) + $tasks_needing_changing = $tasks |? { $_.State -ne $state } + if (-not($tasks_needing_changing -eq $null)) { if ($state -eq 'Disabled') { @@ -69,6 +69,7 @@ try Set-Attr $result "tasks_changed" @() $result.changed = $false } + Exit-Json $result; } catch From ede4820562423632610359c07623a158acf0282f Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Wed, 6 May 2015 21:47:39 +0100 Subject: [PATCH 441/720] version_added -> 2, remove empty aliases --- windows/win_scheduled_task.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index ac353c14c0a..e755890b319 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' --- module: win_scheduled_task -version_added: "1.9" +version_added: "2.0" short_description: Manage scheduled tasks description: - Manage scheduled tasks @@ -35,7 +35,6 @@ options: - Supports * as wildcard required: true default: null - aliases: [] state: description: - State that the task should become @@ -44,7 +43,6 @@ options: - Disabled - Enabled default: Enabled - aliases: [] author: Peter Mounce ''' From d9211b709b2f6a8bb46118ec3ae95907551c158f Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Wed, 6 May 2015 21:48:19 +0100 Subject: [PATCH 442/720] no default, remove it --- windows/win_scheduled_task.py | 1 - 1 file changed, 1 deletion(-) diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index e755890b319..7c604ecec20 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -34,7 +34,6 @@ options: - Name of the scheduled task - Supports * as wildcard required: true - default: null state: description: - State that the task should become From a4a3a1343953cf996b57bb6b91a55cdb6678ca12 Mon Sep 17 00:00:00 2001 From: Peter Mounce Date: Tue, 19 May 2015 11:21:23 +0100 Subject: [PATCH 443/720] Code-review Swap state enabled/disabled -> enabled yes/no --- windows/win_scheduled_task.ps1 | 22 +++++++++------------- windows/win_scheduled_task.py | 10 +++++----- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 52b68dd5b6a..2f802f59cd0 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -33,33 +33,29 @@ else { Fail-Json $result "missing required argument: name" } -if ($params.state) +if ($params.enabled) { - $state = $params.state.ToString() - if (($state -ne 'Enabled') -and ($state -ne 'Disabled')) - { - Fail-Json $result "state is '$state'; must be 'Enabled' or 'Disabled'" - } + $enabled = $params.enabled | ConvertTo-Bool } else { - $state = "Enabled" + $enabled = $true } - +$target_state = @{$true = "Enabled"; $false="Disabled"}[$enabled] try { $tasks = Get-ScheduledTask -TaskPath $name - $tasks_needing_changing = $tasks |? { $_.State -ne $state } + $tasks_needing_changing = $tasks |? { $_.State -ne $target_state } if (-not($tasks_needing_changing -eq $null)) { - if ($state -eq 'Disabled') + if ($enabled) { - $tasks_needing_changing | Disable-ScheduledTask + $tasks_needing_changing | Enable-ScheduledTask } - elseif ($state -eq 'Enabled') + else { - $tasks_needing_changing | Enable-ScheduledTask + $tasks_needing_changing | Disable-ScheduledTask } Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) $result.changed = $true diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index 7c604ecec20..2c5867402c5 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -34,18 +34,18 @@ options: - Name of the scheduled task - Supports * as wildcard required: true - state: + enabled: description: - State that the task should become required: false choices: - - Disabled - - Enabled - default: Enabled + - yes + - no + default: yes author: Peter Mounce ''' EXAMPLES = ''' # Disable the scheduled tasks with "WindowsUpdate" in their name - win_scheduled_task: name="*WindowsUpdate*" state=disabled + win_scheduled_task: name="*WindowsUpdate*" enabled=no ''' From fa2f250f14925b495d068110c66f9cad69b1450a Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Tue, 19 May 2015 15:05:31 +0200 Subject: [PATCH 444/720] Added eval for pasting tag lists --- monitoring/datadog_event.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 1d6a98dc9c3..a3ac92a03bb 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -116,7 +116,10 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") + if module.params['tags'].startswith("[") and module.params['tags'].endswith("]"): + body['tags'] = eval(module.params['tags']) + else: + body['tags'] = module.params['tags'].split(",") if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From a58e5a0efc0ca955b34aa32eb89bc6710aa75e7f Mon Sep 17 00:00:00 2001 From: tedder Date: Tue, 19 May 2015 10:30:18 -0700 Subject: [PATCH 445/720] fix 404'd url. There's no perfect replacement, but this is okay. --- monitoring/newrelic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 93d55832fd3..de6d73c30ad 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -25,7 +25,7 @@ version_added: "1.2" author: Matt Coddington short_description: Notify newrelic about app deployments description: - - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) + - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) options: token: description: From 4cc8c6b340dc260f32251bb4c2e20cc3af6579eb Mon Sep 17 00:00:00 2001 From: tedder Date: Tue, 19 May 2015 10:31:01 -0700 Subject: [PATCH 446/720] they call it a key, not a token --- monitoring/newrelic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index de6d73c30ad..326e7a0ca75 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -29,7 +29,7 @@ description: options: token: description: - - API token. + - API token, to place in the x-api-key header. required: true app_name: description: From c9e02dd7d83ee165370ba6b9d9ff55556677c35f Mon Sep 17 00:00:00 2001 From: tedder Date: Tue, 19 May 2015 10:59:49 -0700 Subject: [PATCH 447/720] use newer required_one_of syntax; unsure why there's app_name vs appname, so leaving it alone. --- monitoring/newrelic_deployment.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 326e7a0ca75..87658e333c7 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -102,6 +102,7 @@ def main(): environment=dict(required=False), validate_certs = dict(default='yes', type='bool'), ), + required_one_of=[['app_name', 'application_id']], supports_check_mode=True ) From 257d8ea2b12edf8f87af4d9b59dcf474a399e0c5 Mon Sep 17 00:00:00 2001 From: Ernst Kuschke Date: Wed, 20 May 2015 16:34:21 +0200 Subject: [PATCH 448/720] Allow any custom chocolatey source This is to allow for a local source (for instance in the form of artifactory) --- windows/win_chocolatey.ps1 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index 22e0d83e77c..de42434da76 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -112,9 +112,9 @@ Else If ($params.source) { $source = $params.source.ToString().ToLower() - If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby")) + If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby") -and (!$source.startsWith("http://", "CurrentCultureIgnoreCase")) -and (!$source.startsWith("https://", "CurrentCultureIgnoreCase"))) { - Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi or windowsfeatures." + Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi, windowsfeatures or a custom source url." } } Elseif (!$params.source) @@ -190,6 +190,10 @@ elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source - { $expression += " -source $source" } +elseif(($source -ne $Null) -and ($source -ne "")) +{ + $expression += " -source $source" +} Set-Attr $result "chocolatey command" $expression $op_result = invoke-expression $expression From 9ae04256fed6147b958416faa265e5233db5d03f Mon Sep 17 00:00:00 2001 From: Tim Bielawa Date: Wed, 20 May 2015 14:43:51 -0400 Subject: [PATCH 449/720] Allow nagios module to be licensed as gpl v2 or newer --- monitoring/nagios.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 64716e81c71..5744fb28875 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -9,7 +9,7 @@ # Tim Bielawa # # This software may be freely redistributed under the terms of the GNU -# general public license version 2. +# general public license version 2 or any later version. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . @@ -189,7 +189,7 @@ def main(): services = module.params['services'] cmdfile = module.params['cmdfile'] command = module.params['command'] - + ################################################################## # Required args per action: # downtime = (minutes, service, host) @@ -356,7 +356,7 @@ class Nagios(object): notif_str = "[%s] %s" % (entry_time, cmd) if host is not None: notif_str += ";%s" % host - + if svc is not None: notif_str += ";%s" % svc @@ -796,42 +796,42 @@ class Nagios(object): return return_str_list else: return "Fail: could not write to the command file" - + def silence_nagios(self): """ This command is used to disable notifications for all hosts and services in nagios. - + This is a 'SHUT UP, NAGIOS' command """ cmd = 'DISABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def unsilence_nagios(self): """ This command is used to enable notifications for all hosts and services in nagios. - + This is a 'OK, NAGIOS, GO'' command """ cmd = 'ENABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) - + def nagios_cmd(self, cmd): """ This sends an arbitrary command to nagios - + It prepends the submitted time and appends a \n - + You just have to provide the properly formatted command """ - + pre = '[%s]' % int(time.time()) - + post = '\n' cmdstr = '%s %s %s' % (pre, cmd, post) self._write_command(cmdstr) - + def act(self): """ Figure out what you want to do from ansible, and then do the @@ -871,13 +871,13 @@ class Nagios(object): services=self.services) elif self.action == 'silence_nagios': self.silence_nagios() - + elif self.action == 'unsilence_nagios': self.unsilence_nagios() - + elif self.action == 'command': self.nagios_cmd(self.command) - + # wtf? else: self.module.fail_json(msg="unknown action specified: '%s'" % \ From 8893c2d2d0a5bbec16cca221a784a7d679288c4d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 20 May 2015 19:23:45 -0700 Subject: [PATCH 450/720] Fix typo: Need lvreduce instead of lvextend. Fixes #513 --- system/lvol.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/system/lvol.py b/system/lvol.py index dc5cbb66732..7ec5ec5cd64 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -218,8 +218,8 @@ def main(): elif size < this_lv['size']: if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = module.get_bin_path("lvextend", required=True) - tool.append("--force") + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') if tool: if module.check_mode: @@ -238,4 +238,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 5187c7fcd72d4750d5a1c9398ceaf62527272eaf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 21 May 2015 07:03:49 -0400 Subject: [PATCH 451/720] removed executable bit --- cloud/amazon/cloudtrail.py | 0 cloud/misc/ovirt.py | 0 files/patch.py | 0 network/dnsimple.py | 0 network/f5/bigip_facts.py | 0 network/lldp.py | 0 network/snmp_facts.py | 0 system/alternatives.py | 0 system/cronvar.py | 0 system/svc.py | 0 web_infrastructure/ejabberd_user.py | 0 11 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 cloud/amazon/cloudtrail.py mode change 100755 => 100644 cloud/misc/ovirt.py mode change 100755 => 100644 files/patch.py mode change 100755 => 100644 network/dnsimple.py mode change 100755 => 100644 network/f5/bigip_facts.py mode change 100755 => 100644 network/lldp.py mode change 100755 => 100644 network/snmp_facts.py mode change 100755 => 100644 system/alternatives.py mode change 100755 => 100644 system/cronvar.py mode change 100755 => 100644 system/svc.py mode change 100755 => 100644 web_infrastructure/ejabberd_user.py diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py old mode 100755 new mode 100644 diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py old mode 100755 new mode 100644 diff --git a/files/patch.py b/files/patch.py old mode 100755 new mode 100644 diff --git a/network/dnsimple.py b/network/dnsimple.py old mode 100755 new mode 100644 diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py old mode 100755 new mode 100644 diff --git a/network/lldp.py b/network/lldp.py old mode 100755 new mode 100644 diff --git a/network/snmp_facts.py b/network/snmp_facts.py old mode 100755 new mode 100644 diff --git a/system/alternatives.py b/system/alternatives.py old mode 100755 new mode 100644 diff --git a/system/cronvar.py b/system/cronvar.py old mode 100755 new mode 100644 diff --git a/system/svc.py b/system/svc.py old mode 100755 new mode 100644 diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py old mode 100755 new mode 100644 From 2d35caf68b16b69c9de337b00322f429a443ad4e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 22 May 2015 14:33:56 +0200 Subject: [PATCH 452/720] zabbix_group: remove current module in favor of PR #346 --- monitoring/zabbix_group.py | 214 ------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 monitoring/zabbix_group.py diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py deleted file mode 100644 index f622de5a4f7..00000000000 --- a/monitoring/zabbix_group.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, René Moser -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: zabbix_group -short_description: Add or remove a host group to Zabbix. -description: - - This module uses the Zabbix API to add and remove host groups. -version_added: '1.8' -requirements: - - "python >= 2.6" - - zabbix-api -options: - state: - description: - - Whether the host group should be added or removed. - required: false - default: present - choices: [ 'present', 'absent' ] - host_group: - description: - - Name of the host group to be added or removed. - required: true - default: null - aliases: [ ] - server_url: - description: - - Url of Zabbix server, with protocol (http or https) e.g. - https://monitoring.example.com/zabbix. C(url) is an alias - for C(server_url). If not set environment variable - C(ZABBIX_SERVER_URL) is used. - required: true - default: null - aliases: [ 'url' ] - login_user: - description: - - Zabbix user name. If not set environment variable - C(ZABBIX_LOGIN_USER) is used. - required: true - default: null - login_password: - description: - - Zabbix user password. If not set environment variable - C(ZABBIX_LOGIN_PASSWORD) is used. - required: true -notes: - - The module has been tested with Zabbix Server 2.2. -author: '"René Moser (@resmo)" ' -''' - -EXAMPLES = ''' ---- -# Add a new host group to Zabbix -- zabbix_group: host_group='Linux servers' - server_url=https://monitoring.example.com/zabbix - login_user=ansible - login_password=secure - -# Add a new host group, login data is provided by environment variables: -# ZABBIX_LOGIN_USER, ZABBIX_LOGIN_PASSWORD, ZABBIX_SERVER_URL: -- zabbix_group: host_group=Webservers - -# Remove a host group from Zabbix -- zabbix_group: host_group='Linux servers' - state=absent - server_url=https://monitoring.example.com/zabbix - login_user=ansible - login_password=secure -''' - -try: - from zabbix_api import ZabbixAPI - HAS_ZABBIX_API = True -except ImportError: - HAS_ZABBIX_API = False - - -def create_group(zbx, host_group): - try: - result = zbx.hostgroup.create( - { - 'name': host_group - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result['groupids'], None - - -def get_group(zbx, host_group): - try: - result = zbx.hostgroup.get( - { - 'filter': - { - 'name': host_group, - } - } - ) - except BaseException as e: - return 1, None, str(e) - - return 0, result[0]['groupid'], None - - -def delete_group(zbx, group_id): - try: - zbx.hostgroup.delete([ group_id ]) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def check_group(zbx, host_group): - try: - result = zbx.hostgroup.exists( - { - 'name': host_group - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result, None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent']), - host_group=dict(required=True, default=None), - server_url=dict(default=None, aliases=['url']), - login_user=dict(default=None), - login_password=dict(default=None), - ), - supports_check_mode=True, - ) - - if not HAS_ZABBIX_API: - module.fail_json(msg='Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)') - - try: - login_user = module.params['login_user'] or os.environ['ZABBIX_LOGIN_USER'] - login_password = module.params['login_password'] or os.environ['ZABBIX_LOGIN_PASSWORD'] - server_url = module.params['server_url'] or os.environ['ZABBIX_SERVER_URL'] - except KeyError, e: - module.fail_json(msg='Missing login data: %s is not set.' % e.message) - - host_group = module.params['host_group'] - state = module.params['state'] - - try: - zbx = ZabbixAPI(server_url) - zbx.login(login_user, login_password) - except BaseException as e: - module.fail_json(msg='Failed to connect to Zabbix server: %s' % e) - - changed = False - msg = '' - - if state == 'present': - (rc, exists, error) = check_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error)) - if not exists: - if module.check_mode: - changed = True - else: - (rc, group, error) = create_group(zbx, host_group) - if rc == 0: - changed = True - else: - module.fail_json(msg='Failed to get host group: %s' % error) - - if state == 'absent': - (rc, exists, error) = check_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error)) - if exists: - if module.check_mode: - changed = True - else: - (rc, group_id, error) = get_group(zbx, host_group) - if rc != 0: - module.fail_json(msg='Failed to get host group: %s' % error) - - (rc, _, error) = delete_group(zbx, group_id) - if rc == 0: - changed = True - else: - module.fail_json(msg='Failed to remove host group: %s' % error) - - module.exit_json(changed=changed) - -from ansible.module_utils.basic import * -main() From 30832ab4d91275412dfe1acccdf846a4d5b860a0 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 28 Mar 2015 08:44:40 -0700 Subject: [PATCH 453/720] add zabbix_group module --- monitoring/zabbix_group.py | 208 +++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 monitoring/zabbix_group.py diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py new file mode 100644 index 00000000000..447ad927b0b --- /dev/null +++ b/monitoring/zabbix_group.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_group +short_description: Zabbix host groups creates/deletes +description: + - Create host groups if they don't exist. + - Delete existing host groups if they exist. +version_added: "1.9" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + state: + description: + - Create or delete host group. + - Possible values are: present and absent. + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 + host_groups: + description: + - List of host groups to create or delete. + required: true +notes: + - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = ''' +# Base create host groups example +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + +# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + when: inventory_hostname==groups['group_name'][0] +''' + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import Already_Exists + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +class HostGroup(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # create host group(s) if not exists + def create_host_group(self, group_names): + try: + group_add_list = [] + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.create({'name': group_name}) + group_add_list.append(group_name) + except Already_Exists: + return group_add_list + return group_add_list + except Exception, e: + self._module.fail_json(msg="Failed to create host group(s): %s" % e) + + # delete host group(s) + def delete_host_group(self, group_ids): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.delete(group_ids) + except Exception, e: + self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e) + + # get group ids by name + def get_group_ids(self, host_groups): + group_ids = [] + + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append(group_id) + return group_ids, group_list + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_groups=dict(required=True), + state=dict(default="present"), + timeout=dict(default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_groups = module.params['host_groups'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + hostGroup = HostGroup(module, zbx) + + group_ids = [] + group_list = [] + if host_groups: + group_ids, group_list = hostGroup.get_group_ids(host_groups) + + if state == "absent": + # delete host groups + if group_ids: + delete_group_names = [] + hostGroup.delete_host_group(group_ids) + for group in group_list: + delete_group_names.append(group['name']) + module.exit_json(changed=True, + result="Successfully deleted host group(s): %s." % ",".join(delete_group_names)) + else: + module.exit_json(changed=False, result="No host group(s) to delete.") + else: + # create host groups + group_add_list = hostGroup.create_host_group(host_groups) + if len(group_add_list) > 0: + module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list) + else: + module.exit_json(changed=False) + +from ansible.module_utils.basic import * +main() + From 8175bd2f781098af0ff1ed8e54e3f74ed5e63d3e Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:42:25 -0700 Subject: [PATCH 454/720] remove superfluous defaults --- monitoring/zabbix_group.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 447ad927b0b..add8e5e0da4 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -36,18 +36,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null state: description: - Create or delete host group. @@ -148,7 +145,7 @@ class HostGroup(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), host_groups=dict(required=True), From f0eb372e995c748bbdf843cf7d48ca65a08a0246 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 22 May 2015 14:42:22 +0200 Subject: [PATCH 455/720] zabbix_group: update authors to new format --- monitoring/zabbix_group.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index add8e5e0da4..d54009251ff 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# # (c) 2013-2014, Epic Games, Inc. # # This file is part of Ansible @@ -17,7 +17,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# + DOCUMENTATION = ''' --- @@ -27,9 +27,13 @@ description: - Create host groups if they don't exist. - Delete existing host groups if they exist. version_added: "1.9" -author: Tony Minfei Ding, Harrison Gu +author: + - "(@cove)" + - "Tony Minfei Ding" + - "Harrison Gu (@harrisongu)" requirements: - - zabbix-api python module + - "python >= 2.6" + - zabbix-api options: server_url: description: From 1c210abc692d5d965028d8f39312d9592cb593bb Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 22 May 2015 14:43:14 +0200 Subject: [PATCH 456/720] zabbix_group: update version added to 1.8 Since we intent to replace the current module, we add the version the current module was added. --- monitoring/zabbix_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index d54009251ff..81fe3be5008 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -26,7 +26,7 @@ short_description: Zabbix host groups creates/deletes description: - Create host groups if they don't exist. - Delete existing host groups if they exist. -version_added: "1.9" +version_added: "1.8" author: - "(@cove)" - "Tony Minfei Ding" From 3f108732c5101973b97087026e3cd51bc558b66d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 22 May 2015 14:44:19 +0200 Subject: [PATCH 457/720] zabbix_group: add backword compatibility * add alias host_group for host_groups * add choices for param state --- monitoring/zabbix_group.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 81fe3be5008..8fc88eb6bf5 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -55,6 +55,7 @@ options: - Possible values are: present and absent. required: false default: "present" + choices: [ "present", "absent" ] timeout: description: - The timeout of API request(seconds). @@ -63,6 +64,7 @@ options: description: - List of host groups to create or delete. required: true + aliases: [ "host_group" ] notes: - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed. ''' @@ -152,8 +154,8 @@ def main(): server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), - host_groups=dict(required=True), - state=dict(default="present"), + host_groups=dict(required=True, aliases=['host_group']), + state=dict(default="present", choices=['present','absent']), timeout=dict(default=10) ), supports_check_mode=True @@ -206,4 +208,3 @@ def main(): from ansible.module_utils.basic import * main() - From c0fb48cd390aeb673e465257888c3311b77dc274 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 22 May 2015 15:05:15 +0200 Subject: [PATCH 458/720] zabbix_group: improve params --- monitoring/zabbix_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 8fc88eb6bf5..8976f9f502f 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -153,10 +153,10 @@ def main(): argument_spec=dict( server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), - login_password=dict(required=True), + login_password=dict(required=True, no_log=True), host_groups=dict(required=True, aliases=['host_group']), state=dict(default="present", choices=['present','absent']), - timeout=dict(default=10) + timeout=dict(type='int', default=10) ), supports_check_mode=True ) From 4ff9a68a4b5833229785ee63a1a84d4ae9b9fea5 Mon Sep 17 00:00:00 2001 From: QuantumGhost Date: Sat, 23 May 2015 00:11:50 +0800 Subject: [PATCH 459/720] Use expanduser to deal with path in bower module So bower module can correctly handle path containing `~` --- packaging/language/bower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index f0dd58023ba..34284356f6e 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -154,7 +154,7 @@ def main(): name = module.params['name'] offline = module.params['offline'] - path = module.params['path'] + path = os.path.expanduser(module.params['path']) state = module.params['state'] version = module.params['version'] From 7505065daa0a76cad46de423771e96cdc726ba4f Mon Sep 17 00:00:00 2001 From: Christian Thiemann Date: Sun, 24 May 2015 02:05:38 +0200 Subject: [PATCH 460/720] Fix alternatives module in non-English locale The alternatives module parses the output of update-alternatives, but the expected English phrases may not show up if the system locale is not English. Setting LC_ALL=C when invoking update-alternatives fixes this problem. --- system/alternatives.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/alternatives.py b/system/alternatives.py index c298afc2949..06d9bea25f0 100644 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -85,7 +85,7 @@ def main(): # Run `update-alternatives --display ` to find existing alternatives (rc, display_output, _) = module.run_command( - [UPDATE_ALTERNATIVES, '--display', name] + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] ) if rc == 0: @@ -106,7 +106,7 @@ def main(): # This is only compatible on Debian-based systems, as the other # alternatives don't have --query available rc, query_output, _ = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] ) if rc == 0: for line in query_output.splitlines(): From 323df26b3e9daa42c78b89e6f9de843a037418e1 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Mon, 25 May 2015 18:26:45 +0600 Subject: [PATCH 461/720] Added conditionals, umount, forceStop, timeout, etc --- cloud/misc/proxmox.py | 140 +++++++++++++++++++++++++++++++++++------- 1 file changed, 119 insertions(+), 21 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index 16698d3b8ac..dc72f32a975 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -15,6 +15,7 @@ # along with Ansible. If not, see . import os +import time import logging try: @@ -32,12 +33,67 @@ def content_check(proxmox, node, ostemplate, storage): def node_check(proxmox, node): return [ True for nd in proxmox.nodes.get() if nd['node'] == node ] -def create_instance(proxmox, vmid, node, disk, storage, cpus, memory, swap, **kwargs): +def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs): proxmox_node = proxmox.nodes(node) - logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(name)s: %(message)s') - proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap, + taskid = proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap, cpus=cpus, disk=disk, **kwargs) + while timeout: + if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' + and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + +def start_instance(module, proxmox, vm, vmid, timeout): + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def stop_instance(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post(forceStop=1) + else: + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def umount_instance(module, proxmox, vm, vmid, timeout): + taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.umount.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + def main(): module = AnsibleModule( argument_spec = dict( @@ -50,7 +106,7 @@ def main(): password = dict(), hostname = dict(), ostemplate = dict(), - disk = dict(dtype='int', default=3), + disk = dict(type='int', default=3), cpus = dict(type='int', default=1), memory = dict(type='int', default=512), swap = dict(type='int', default=0), @@ -61,8 +117,9 @@ def main(): cpuunits = dict(type='int', default=1000), nameserver = dict(), searchdomain = dict(), + timeout = dict(type='int', default=30), force = dict(type='bool', choices=BOOLEANS, default='no'), - state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restart']), + state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), ) ) @@ -81,7 +138,9 @@ def main(): memory = module.params['memory'] swap = module.params['swap'] storage = module.params['storage'] + timeout = module.params['timeout'] + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(name)s: %(message)s') # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: @@ -106,7 +165,7 @@ def main(): module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, storage)) - create_instance(proxmox, vmid, node, disk, storage, cpus, memory, swap, + create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, password = module.params['password'], hostname = module.params['hostname'], ostemplate = module.params['ostemplate'], @@ -118,7 +177,7 @@ def main(): searchdomain = module.params['searchdomain'], force = int(module.params['force'])) - module.exit_json(changed=True, vmid=vmid) + module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception, e: module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e )) @@ -126,12 +185,12 @@ def main(): try: vm = get_instance(proxmox, vmid) if not vm: - module.fail_json(msg='VM with vmid %s not exists in cluster' % vmid) - if [ True for vm in proxmox.node(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'started' ]: - module.exit_json(changed=False, vmid=vmid) + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) - proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() - module.exit_json(changed=True, vmid=vmid) + if start_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception, e: module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) @@ -139,23 +198,62 @@ def main(): try: vm = get_instance(proxmox, vmid) if not vm: - module.fail_json(msg='VM with vmid %s not exists in cluster' % vmid) - if [ True for vm in proxmox.node(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped' ]: - module.exit_json(changed=False, vmid=vmid) + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + if module.params['force']: + if umount_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + else: + module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " + "You can use force option to umount it.") % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) + + if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception, e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'restarted': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if ( proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped' + or proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted' ): + module.exit_json(changed=False, msg="VM %s is not running" % vmid) - proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post() - module.exit_json(changed=True, vmid=vmid) + if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and + start_instance(module, proxmox, vm, vmid, timeout) ): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception, e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) + module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'absent': try: vm = get_instance(proxmox, vmid) if not vm: - module.exit_json(changed=False, vmid=vmid) + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + + taskid = proxmox.nodes(vm[0]['node']).openvz.delete(vmid) + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) - proxmox.nodes(vm[0]['node']).openvz.delete(vmid) - module.exit_json(changed=True, vmid=vmid) + time.sleep(1) except Exception, e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) From cb09a269007ca99d2991f1bcbeae6b08054056bb Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Mon, 25 May 2015 19:10:34 +0600 Subject: [PATCH 462/720] Added documentation --- cloud/misc/proxmox.py | 147 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index dc72f32a975..c0967f5a28b 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -14,6 +14,153 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: proxmox +short_description: management of instances in Proxmox VE cluster +description: + - allows you to create/delete/stop instances in Proxmox VE cluster +options: + api_host: + description: + - the host of the Proxmox VE cluster + default: null + required: true + api_user: + description: + - the user to authenticate with + default: null + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + default: null + required: false + vmid: + description: + - the instance id + default: null + required: true + https_verify_ssl: + description: + - enable / disable https certificate verification + default: false + required: false + type: boolean + node: + description: + - Proxmox VE node, when new VM will be created + - required only for state="present" + - for another states will be autodiscovered + default: null + required: false + password: + description: + - the instance root password + - required only for state="present" + default: null + required: false + hostname: + description: + - the instance hostname + - required only for state="present" + default: null + required: false + ostemplate: + description: + - the template for VM creating + - required only for state="present" + default: null + required: false + disk: + description: + - hard disk size in GB for instance + default: 3 + required: false + cpus: + description: + - numbers of allocated cpus for instance + default: 1 + required: false + memory: + description: + - memory size in MB for instance + default: 512 + required: false + swap: + description: + - swap memory size in MB for instance + default: 0 + required: false + netif: + description: + - specifies network interfaces for the container + default: null + required: false + type: string + ip_address: + description: + - specifies the address the container will be assigned + default: null + required: false + type: string + onboot: + description: + - specifies whether a VM will be started during system bootup + default: false + required: false + type: boolean + storage: + description: + - target storage + default: 'local' + required: false + type: string + cpuunits: + description: + - CPU weight for a VM + default: 1000 + required: false + type: integer + nameserver: + description: + - sets DNS server IP address for a container + default: null + required: false + type: string + searchdomain: + description: + - sets DNS search domain for a container + default: null + required: false + type: string + timeout: + description: + - timeout for operations + default: 30 + required: false + type: integer + force: + description: + - forcing operations + - can be used only with states "present", "stopped", "restarted" + - with state="present" force option allow to overwrite existing container + - with states "stopped", "restarted" allow to force stop instance + default: false + required: false + type: boolean + state: + description: + - Indicate desired state of the instance + choices: ['present', 'started', 'absent', 'stopped', 'restarted'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: Sergei Antipov +''' + import os import time import logging From 69d3474bcc7381c2e163419257b7bb8be41de40d Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Mon, 25 May 2015 19:11:08 +0600 Subject: [PATCH 463/720] Deleted debugging --- cloud/misc/proxmox.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index c0967f5a28b..ef72eff6138 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -163,7 +163,6 @@ author: Sergei Antipov import os import time -import logging try: from proxmoxer import ProxmoxAPI @@ -287,7 +286,6 @@ def main(): storage = module.params['storage'] timeout = module.params['timeout'] - logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(name)s: %(message)s') # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: From 8dfa63d1d8be333dd107f4f90be2c337b4909432 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 25 May 2015 09:35:11 -0700 Subject: [PATCH 464/720] Add version_added dodmentation --- cloud/misc/proxmox.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index ef72eff6138..9742f95ba50 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -20,6 +20,7 @@ module: proxmox short_description: management of instances in Proxmox VE cluster description: - allows you to create/delete/stop instances in Proxmox VE cluster +version_added: "2.0" options: api_host: description: From a1a1c23205f2bd719ca3e2f88bb4598aea478d0a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 25 May 2015 19:29:55 -0700 Subject: [PATCH 465/720] Bandaid dnf module to temporarily fix some major problems - Backport config file handling from the DNF module rewrite #527 (Current config handling does not work with dnf and leads to tracebacks when run as an unprivileged user). - Make a mandatory requirement on yum-utils (for /usr/bin/repoquery) because none of the fallback code works for dnf (it's unported yum API code). Both of these issues will be fixed better in the dnf rewrite when it is feature complete. Fixes #471 --- packaging/os/dnf.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index c76f39b1dd6..e40c268f742 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -92,7 +92,9 @@ options: notes: [] # informational: requirements for nodes -requirements: [ dnf ] +requirements: + - dnf + - yum-utils (for repoquery) author: '"Cristian van Ee (@DJMuggs)" ' ''' @@ -137,18 +139,12 @@ def log(msg): def dnf_base(conf_file=None, cachedir=False): my = dnf.Base() - my.logging.verbose_level=0 - my.logging.verbose_level=0 + my.conf.debuglevel=0 if conf_file and os.path.exists(conf_file): - my.config = conf_file - if cachedir or os.geteuid() != 0: - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = cachedir.dnf.Conf() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 + my.conf.config_file_path = conf_file + my.conf.read() + my.read_all_repos() + my.fill_sack() return my @@ -157,7 +153,7 @@ def install_dnf_utils(module): if not module.check_mode: dnf_path = module.get_bin_path('dnf') if dnf_path: - rc, so, se = module.run_command('%s -y install dnf-plugins-core' % dnf_path) + rc, so, se = module.run_command('%s -y install yum-utils' % dnf_path) if rc == 0: this_path = module.get_bin_path('repoquery') global repoquery @@ -812,9 +808,9 @@ def main(): if params['install_repoquery'] and not repoquery and not module.check_mode: install_dnf_utils(module) + if not repoquery: + module.fail_json(msg="repoquery is required to use this module at this time. Please install the yum-utils package.") if params['list']: - if not repoquery: - module.fail_json(msg="repoquery is required to use list= with this module. Please install the dnf-utils package.") results = dict(results=list_stuff(module, params['conf_file'], params['list'])) module.exit_json(**results) From ed6283d4224ab96970a0a8c44e87edccb909de2d Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 26 May 2015 11:22:45 +0600 Subject: [PATCH 466/720] Added examples --- cloud/misc/proxmox.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index 9742f95ba50..a1f21073b0e 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -162,6 +162,32 @@ requirements: [ "proxmoxer", "requests" ] author: Sergei Antipov ''' +EXAMPLES = ''' +# Create new container with minimal options +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container with minimal options with force(it will rewrite existing container) +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes + +# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Start container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started + +# Stop container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped + +# Stop container with force +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped + +# Restart container(stopped or mounted container you can't restart) +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped + +# Remove container +- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent +''' + import os import time From 148bd52d8b1f9650dd65ecd9c62be4e7e697746d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 13:52:47 +0200 Subject: [PATCH 467/720] proxmox: update module doc --- cloud/misc/proxmox.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index 9742f95ba50..74c49afef92 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -25,12 +25,10 @@ options: api_host: description: - the host of the Proxmox VE cluster - default: null required: true api_user: description: - the user to authenticate with - default: null required: true api_password: description: @@ -52,26 +50,26 @@ options: node: description: - Proxmox VE node, when new VM will be created - - required only for state="present" + - required only for C(state=present) - for another states will be autodiscovered default: null required: false password: description: - the instance root password - - required only for state="present" + - required only for C(state=present) default: null required: false hostname: description: - the instance hostname - - required only for state="present" + - required only for C(state=present) default: null required: false ostemplate: description: - the template for VM creating - - required only for state="present" + - required only for C(state=present) default: null required: false disk: @@ -145,9 +143,9 @@ options: force: description: - forcing operations - - can be used only with states "present", "stopped", "restarted" - - with state="present" force option allow to overwrite existing container - - with states "stopped", "restarted" allow to force stop instance + - can be used only with states C(present), C(stopped), C(restarted) + - with C(state=present) force option allow to overwrite existing container + - with states C(stopped) , C(restarted) allow to force stop instance default: false required: false type: boolean @@ -159,7 +157,7 @@ options: notes: - Requires proxmoxer and requests modules on host. This modules can be installed with pip. requirements: [ "proxmoxer", "requests" ] -author: Sergei Antipov +author: "Sergei Antipov @UnderGreen" ''' import os From e72f581a168718cda5f9f92e151f1a861f135426 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 13:53:28 +0200 Subject: [PATCH 468/720] proxmox: prevent to log passwords --- cloud/misc/proxmox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index 74c49afef92..7978fca53ad 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -244,11 +244,11 @@ def main(): argument_spec = dict( api_host = dict(required=True), api_user = dict(required=True), - api_password = dict(), + api_password = dict(no_log=True), vmid = dict(required=True), https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), node = dict(), - password = dict(), + password = dict(no_log=True), hostname = dict(), ostemplate = dict(), disk = dict(type='int', default=3), From c9dfeb07c13e3814706280f0277c537f148dd856 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 15:28:50 +0200 Subject: [PATCH 469/720] zabbix: remove redundant import of module_utils --- monitoring/zabbix_host.py | 1 - monitoring/zabbix_hostmacro.py | 1 - monitoring/zabbix_screen.py | 2 -- 3 files changed, 4 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 2d6ce04b830..7848ce78b53 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -118,7 +118,6 @@ EXAMPLES = ''' import logging import copy -from ansible.module_utils.basic import * try: from zabbix_api import ZabbixAPI, ZabbixAPISubClass diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index a8649454025..873b33900f7 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -84,7 +84,6 @@ EXAMPLES = ''' import logging import copy -from ansible.module_utils.basic import * try: from zabbix_api import ZabbixAPI, ZabbixAPISubClass diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 5ada0447571..46029f9196a 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -127,8 +127,6 @@ EXAMPLES = ''' when: inventory_hostname==groups['group_name'][0] ''' -from ansible.module_utils.basic import * - try: from zabbix_api import ZabbixAPI, ZabbixAPISubClass from zabbix_api import ZabbixAPIException From 9d07fb9618c601c4f709950189914873e2a289d7 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 15:33:50 +0200 Subject: [PATCH 470/720] zabbix: improve arguments and prevent to log password --- monitoring/zabbix_host.py | 8 ++++---- monitoring/zabbix_hostmacro.py | 6 +++--- monitoring/zabbix_screen.py | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 7848ce78b53..5b1ee788d0b 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -338,13 +338,13 @@ def main(): argument_spec=dict( server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), - login_password=dict(required=True), + login_password=dict(required=True, no_log=True), host_name=dict(required=True), host_groups=dict(required=False), link_templates=dict(required=False), - status=dict(default="enabled"), - state=dict(default="present"), - timeout=dict(default=10), + status=dict(default="enabled", choices=['enabled', 'disabled']), + state=dict(default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10), interfaces=dict(required=False) ), supports_check_mode=True diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 873b33900f7..fc77df477e7 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -170,12 +170,12 @@ def main(): argument_spec=dict( server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), - login_password=dict(required=True), + login_password=dict(required=True, no_log=True), host_name=dict(required=True), macro_name=dict(required=True), macro_value=dict(required=True), - state=dict(default="present"), - timeout=dict(default=10) + state=dict(default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10) ), supports_check_mode=True ) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 46029f9196a..408ab00c9ae 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -317,9 +317,9 @@ def main(): argument_spec=dict( server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), - login_password=dict(required=True), - timeout=dict(default=10), - screens=dict(required=True) + login_password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10), + screens=dict(type='dict', required=True) ), supports_check_mode=True ) From 1eb6c37594077398b77b5c251aca24e39c2bd8ee Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 15:34:35 +0200 Subject: [PATCH 471/720] zabbix: improve module doc --- monitoring/zabbix_host.py | 12 ++++++++---- monitoring/zabbix_hostmacro.py | 7 +++++-- monitoring/zabbix_screen.py | 8 ++++---- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 5b1ee788d0b..37130a21ecf 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -63,24 +63,28 @@ options: default: None status: description: - - 'Monitoring status of the host. Possible values are: "enabled" and "disabled".' + - 'Monitoring status of the host. required: false + choices: ['enabled', 'disabled'] default: "enabled" state: description: - - 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.' + - State of the host. + - On C(present), it will create if host does not exist or update the host if the associated data is different. + - On C(absent) will remove a host if it exists. required: false + choices: ['present', 'absent'] default: "present" timeout: description: - - The timeout of API request(seconds). + - The timeout of API request (seconds). default: 10 interfaces: description: - List of interfaces to be created for the host (see example below). - 'Available values are: dns, ip, main, port, type and useip.' - Please review the interface documentation for more information on the supported properties - - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface + - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface' required: false default: [] ''' diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index fc77df477e7..e8d65370760 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -60,12 +60,15 @@ options: required: true state: description: - - 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.' + - State of the macro. + - On C(present), it will create if macro does not exist or update the macro if the associated data is different. + - On C(absent) will remove a macro if it exists. required: false + choices: ['present', 'absent'] default: "present" timeout: description: - - The timeout of API request(seconds). + - The timeout of API request (seconds). default: 10 ''' diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 408ab00c9ae..12ef6c69b6f 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -50,15 +50,15 @@ options: required: true timeout: description: - - The timeout of API request(seconds). + - The timeout of API request (seconds). default: 10 zabbix_screens: description: - List of screens to be created/updated/deleted(see example). - If the screen(s) already been added, the screen(s) name won't be updated. - - When creating or updating screen(s), the screen_name, host_group are required. - - When deleting screen(s), the screen_name is required. - - 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.' + - When creating or updating screen(s), C(screen_name), C(host_group) are required. + - When deleting screen(s), the C(screen_name) is required. + - 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.' required: true notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. From d04e6b1d070f390008e82d5b23a5d6d68a37e102 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 26 May 2015 15:41:37 +0200 Subject: [PATCH 472/720] zabbix_maintenance: prevent to log password --- monitoring/zabbix_maintenance.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 02938234fbf..25d7c8df95e 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -48,12 +48,10 @@ options: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_names: description: - Hosts to manage maintenance window for. @@ -83,7 +81,6 @@ options: description: - Unique name of maintenance window. required: true - default: null desc: description: - Short description of maintenance window. @@ -273,9 +270,9 @@ def main(): host_names=dict(type='list', required=False, default=None, aliases=['host_name']), minutes=dict(type='int', required=False, default=10), host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), - login_user=dict(required=True, default=None), - login_password=dict(required=True, default=None), - name=dict(required=True, default=None), + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + name=dict(required=True), desc=dict(required=False, default="Created by Ansible"), collect_data=dict(type='bool', required=False, default=True), ), From 8b62a45466f4cb90e357a5495dbaa4b2f9b27486 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 26 May 2015 15:40:20 -0700 Subject: [PATCH 473/720] Fix docs - don't need quote anymore --- monitoring/zabbix_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 37130a21ecf..772e92cb32d 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -63,7 +63,7 @@ options: default: None status: description: - - 'Monitoring status of the host. + - Monitoring status of the host. required: false choices: ['enabled', 'disabled'] default: "enabled" From 3f9a62c1361dc5e707cf4024d6470e7d0cd285ef Mon Sep 17 00:00:00 2001 From: Enric Lluelles Date: Wed, 27 May 2015 09:55:42 +0200 Subject: [PATCH 474/720] Add slash to valid characters for packages To allow it to download packes from taps, or external commands like caskroom/cask/brew-cask --- packaging/os/homebrew.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index b519efa071f..0b37521820d 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -116,6 +116,7 @@ class Homebrew(object): VALID_PACKAGE_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots + / # slash (for taps) \+ # plusses - # dashes ''' From 7f74a98451674d1d735ab92295ca64ebd340bf65 Mon Sep 17 00:00:00 2001 From: Dagobert Michelsen Date: Wed, 27 May 2015 13:35:34 +0200 Subject: [PATCH 475/720] svr4pkg: assume command worked only on known-to-be-good returncodes --- packaging/os/svr4pkg.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/packaging/os/svr4pkg.py b/packaging/os/svr4pkg.py index 51cda437e7f..5d8bac17eaa 100644 --- a/packaging/os/svr4pkg.py +++ b/packaging/os/svr4pkg.py @@ -209,15 +209,25 @@ def main(): (rc, out, err) = package_uninstall(module, name, src, category) out = out[:75] - # Success, Warning, Interruption, Reboot all, Reboot this return codes + # Returncodes as per pkgadd(1m) + # 0 Successful completion + # 1 Fatal error. + # 2 Warning. + # 3 Interruption. + # 4 Administration. + # 5 Administration. Interaction is required. Do not use pkgadd -n. + # 10 Reboot after installation of all packages. + # 20 Reboot after installation of this package. + # 99 (observed) pkgadd: ERROR: could not process datastream from if rc in (0, 2, 3, 10, 20): result['changed'] = True # no install nor uninstall, or failed else: result['changed'] = False - # Fatal error, Administration, Administration Interaction return codes - if rc in (1, 4 , 5): + # Only return failed=False when the returncode is known to be good as there may be more + # undocumented failure return codes + if rc not in (0, 2, 10, 20): result['failed'] = True else: result['failed'] = False From 141dda9978a197801a503347c8bd611cb368dda8 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 27 May 2015 20:54:26 +0200 Subject: [PATCH 476/720] firewalld: remove BabyJSON See https://github.com/ansible/ansible-modules-extras/issues/430 --- system/firewalld.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 77cfc4b6bb8..e16e4e4a9dd 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -67,8 +67,8 @@ options: required: false default: 0 notes: - - Not tested on any debian based system. -requirements: [ firewalld >= 0.2.11 ] + - Not tested on any Debian based system. +requirements: [ 'firewalld >= 0.2.11' ] author: '"Adam Miller (@maxamillion)" ' ''' @@ -82,7 +82,6 @@ EXAMPLES = ''' import os import re -import sys try: import firewall.config @@ -90,14 +89,9 @@ try: from firewall.client import FirewallClient fw = FirewallClient() - if not fw.connected: - raise Exception('failed to connect to the firewalld daemon') + HAS_FIREWALLD = True except ImportError: - print "failed=True msg='firewalld required for this module'" - sys.exit(1) -except Exception, e: - print "failed=True msg='%s'" % str(e) - sys.exit(1) + HAS_FIREWALLD = False ################ # port handling @@ -223,6 +217,9 @@ def main(): supports_check_mode=True ) + if not HAS_FIREWALLD: + module.fail_json(msg='firewalld required for this module') + ## Pre-run version checking if FW_VERSION < "0.2.11": module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') @@ -400,6 +397,4 @@ def main(): ################################################# # import module snippets from ansible.module_utils.basic import * - main() - From c223716bc7ccf2d0ac7995b36f76cca8ccd5bfda Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 28 May 2015 10:04:58 -0400 Subject: [PATCH 477/720] fixed version on consul modules --- clustering/consul.py | 2 +- clustering/consul_acl.py | 2 +- clustering/consul_kv.py | 2 +- clustering/consul_session.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clustering/consul.py b/clustering/consul.py index 9195c0ff591..0baaae83b84 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -41,7 +41,7 @@ requirements: - "python >= 2.6" - python-consul - requests -version_added: "1.9" +version_added: "2.0" author: '"Steve Gargan (@sgargan)" ' options: state: diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index 31cb01d1404..b832281bb80 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -29,7 +29,7 @@ requirements: - python-consul - pyhcl - requests -version_added: "1.9" +version_added: "2.0" author: '"Steve Gargan (@sgargan)" ' options: mgmt_token: diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 13437d95cce..69a66c746ab 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -31,7 +31,7 @@ requirements: - "python >= 2.6" - python-consul - requests -version_added: "1.9" +version_added: "2.0" author: '"Steve Gargan (@sgargan)" ' options: state: diff --git a/clustering/consul_session.py b/clustering/consul_session.py index 8e7f763a21d..d57c2b69db8 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -29,7 +29,7 @@ requirements: - "python >= 2.6" - python-consul - requests -version_added: "1.9" +version_added: "2.0" author: '"Steve Gargan (@sgargan)" ' options: state: From f6dee55ee81d7a17e5efc94f5399183be555181f Mon Sep 17 00:00:00 2001 From: fdupoux Date: Thu, 28 May 2015 19:46:53 +0100 Subject: [PATCH 478/720] Removed conditional assignment of yesopt to make it work with python-2.4 (to pass the Travis-CI test) --- system/lvol.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index 49bd713e16d..14bab6926e4 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -125,7 +125,10 @@ def main(): if version_found == None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option - yesopt = "--yes" if version_found >= version_yesopt else "" + if version_found >= version_yesopt: + yesopt = "--yes" + else: + yesopt = "" vg = module.params['vg'] lv = module.params['lv'] From 5b401cfcc30cb84dcf19a4c05b5a0791303d8378 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 28 May 2015 16:23:27 -0400 Subject: [PATCH 479/720] Add module to run puppet There is a growing pattern for using ansible to orchestrate runs of existing puppet code. For instance, the OpenStack Infrastructure team started using ansible for this very reason. It also turns out that successfully running puppet and interpreting success or failure is harder than you'd expect, thus warranting a module and not just a shell command. This is ported in from http://git.openstack.org/cgit/openstack-infra/ansible-puppet --- system/puppet.py | 186 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 system/puppet.py diff --git a/system/puppet.py b/system/puppet.py new file mode 100644 index 00000000000..c53c88f595d --- /dev/null +++ b/system/puppet.py @@ -0,0 +1,186 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import json +import os +import pipes + +DOCUMENTATION = ''' +--- +module: puppet +short_description: Runs puppet +description: + - Runs I(puppet) agent or apply in a reliable manner +version_added: "2.0" +options: + timeout: + description: + - How long to wait for I(puppet) to finish. + required: false + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. Must have this or manifest + required: false + default: None + manifest: + desciption: + - Path to the manifest file to run puppet apply on. Must have this or puppetmaster + required: false + default: None + show_diff: + description: + - Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default. + required: false + default: no + choices: [ "yes", "no" ] + facts: + description: + - A dict of values to pass in as persistent external facter facts + required: false + default: None + facter_basename: + desciption: + - Basename of the facter output file + required: false + default: ansible +requirements: [ puppet ] +author: Monty Taylor +''' + +EXAMPLES = ''' +# Run puppet and fail if anything goes wrong +- puppet + +# Run puppet and timeout in 5 minutes +- puppet: timeout=5m +''' + + +def _get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + with os.fdopen( + os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), + 'w') as out_file: + out_file.write(json.dumps(data).encode('utf8')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(default="30m"), + puppetmaster=dict(required=False, default=None), + manifest=dict(required=False, default=None), + show_diff=dict( + default=False, aliases=['show-diff'], type='bool'), + facts=dict(default=None), + facter_basename=dict(default='ansible'), + ), + required_one_of=[ + ('puppetmaster', 'manifest'), + ], + ) + p = module.params + + global PUPPET_CMD + PUPPET_CMD = module.get_bin_path("puppet", False) + + if not PUPPET_CMD: + module.fail_json( + msg="Could not find puppet. Please ensure it is installed.") + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest']) + + # Check if puppet is disabled here + if p['puppetmaster']: + rc, stdout, stderr = module.run_command( + PUPPET_CMD + "config print agent_disabled_lockfile") + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + if module.params['facts']: + _write_structured_data( + _get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( + timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) + + if p['puppetmaster']: + cmd = ("%(base_cmd) agent --onetime" + " --server %(puppetmaster)s" + " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" + " --detailed-exitcodes --verbose") % dict( + base_cmd=base_cmd, + puppetmaster=pipes.quote(p['puppetmaster'])) + if p['show_diff']: + cmd += " --show-diff" + else: + cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( + base_cmd=base_cmd, + manifest=pipes.quote(p['manifest'])) + rc, stdout, stderr = module.run_command(cmd) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout) + elif rc == 1: + # rc==1 could be because it's disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (cmd, rc), + stdout=stdout, stderr=stderr) + +# import module snippets +from ansible.module_utils.basic import * + +main() From 1276420a3a39340fcd9e053a1e621cdd89f480fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 16:00:43 -0700 Subject: [PATCH 480/720] Fix documentation formatting --- monitoring/zabbix_group.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 8976f9f502f..4aad1218789 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' module: zabbix_group short_description: Zabbix host groups creates/deletes description: - - Create host groups if they don't exist. + - Create host groups if they do not exist. - Delete existing host groups if they exist. version_added: "1.8" author: @@ -52,7 +52,6 @@ options: state: description: - Create or delete host group. - - Possible values are: present and absent. required: false default: "present" choices: [ "present", "absent" ] From 1605b1ec9cb7746dada8006fe317999511ac46cc Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 07:00:30 -0400 Subject: [PATCH 481/720] Fix some errors pointed out by travis --- system/puppet.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index c53c88f595d..57c76eeec9f 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -82,10 +82,10 @@ def _write_structured_data(basedir, basename, data): if not os.path.exists(basedir): os.makedirs(basedir) file_path = os.path.join(basedir, "{0}.json".format(basename)) - with os.fdopen( - os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), - 'w') as out_file: - out_file.write(json.dumps(data).encode('utf8')) + out_file = os.fdopen( + os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() def main(): @@ -116,7 +116,7 @@ def main(): if not os.path.exists(p['manifest']): module.fail_json( msg="Manifest file %(manifest)s not found." % dict( - manifest=p['manifest']) + manifest=p['manifest'])) # Check if puppet is disabled here if p['puppetmaster']: @@ -149,8 +149,8 @@ def main(): cmd += " --show-diff" else: cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( - base_cmd=base_cmd, - manifest=pipes.quote(p['manifest'])) + base_cmd=base_cmd, + manifest=pipes.quote(p['manifest']))) rc, stdout, stderr = module.run_command(cmd) if rc == 0: From 12c945388b0ffa37aecc7b7f33fb11b41b82f309 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 07:06:15 -0400 Subject: [PATCH 482/720] Add support for check mode --- system/puppet.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index 57c76eeec9f..d6bc4348375 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -99,6 +99,7 @@ def main(): facts=dict(default=None), facter_basename=dict(default='ansible'), ), + supports_check_mode=True, required_one_of=[ ('puppetmaster', 'manifest'), ], @@ -129,7 +130,7 @@ def main(): module.fail_json( msg="Puppet agent state could not be determined.") - if module.params['facts']: + if module.params['facts'] and not module.check_mode: _write_structured_data( _get_facter_dir(), module.params['facter_basename'], @@ -139,7 +140,7 @@ def main(): timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) if p['puppetmaster']: - cmd = ("%(base_cmd) agent --onetime" + cmd = ("%(base_cmd)s agent --onetime" " --server %(puppetmaster)s" " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" " --detailed-exitcodes --verbose") % dict( @@ -147,10 +148,13 @@ def main(): puppetmaster=pipes.quote(p['puppetmaster'])) if p['show_diff']: cmd += " --show-diff" + if module.check_mode: + cmd += " --noop" else: - cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( - base_cmd=base_cmd, - manifest=pipes.quote(p['manifest']))) + cmd = "%s apply --detailed-exitcodes " % base_cmd + if module.check_mode: + cmd += "--noop " + cmd += pipes.quote(p['manifest']) rc, stdout, stderr = module.run_command(cmd) if rc == 0: From 8b6001c3da53553688f218e6b11c84c0c705c2a2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 08:09:31 -0400 Subject: [PATCH 483/720] Fix octal values for python 2.4 --- system/puppet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index d6bc4348375..46a5ea58d4f 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -18,6 +18,7 @@ import json import os import pipes +import stat DOCUMENTATION = ''' --- @@ -82,8 +83,13 @@ def _write_structured_data(basedir, basename, data): if not os.path.exists(basedir): os.makedirs(basedir) file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed out_file = os.fdopen( - os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') out_file.write(json.dumps(data).encode('utf8')) out_file.close() From 4939df305b9f49e7135657b23950213b036c12a2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 29 May 2015 10:07:00 +0200 Subject: [PATCH 484/720] cloudstack: improve required params --- cloud/cloudstack/cs_account.py | 3 +++ cloud/cloudstack/cs_affinitygroup.py | 3 +++ cloud/cloudstack/cs_firewall.py | 7 +++++++ cloud/cloudstack/cs_instance.py | 3 +++ cloud/cloudstack/cs_instancegroup.py | 3 +++ cloud/cloudstack/cs_iso.py | 3 +++ cloud/cloudstack/cs_portforward.py | 3 +++ cloud/cloudstack/cs_securitygroup.py | 3 +++ cloud/cloudstack/cs_securitygroup_rule.py | 4 ++++ cloud/cloudstack/cs_sshkeypair.py | 3 +++ cloud/cloudstack/cs_vmsnapshot.py | 4 ++++ 11 files changed, 39 insertions(+) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 399dfa090cc..a8510bbc5b3 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -369,6 +369,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 2a8de46fe41..9ff3b123a0c 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -223,6 +223,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index c9e42be4a4f..ef78b6a242d 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -422,6 +422,13 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_one_of = ( + ['ip_address', 'network'], + ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 1f5cc6ca393..c2c219febac 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -788,6 +788,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index d62004cc94f..9041e351539 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -200,6 +200,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 749acdf594a..4a97fc3d027 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -333,6 +333,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 123da67e2bc..47af7848ee1 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -407,6 +407,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 73a54fef795..9ef81095322 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -167,6 +167,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index ef48b3896ce..a467d3f5c38 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -402,6 +402,10 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 0d2e2c822f1..e7ee88e3bea 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -219,6 +219,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index b71901a317f..cadf229af55 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -292,6 +292,10 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) From 4da1a5de9e72af210563fe8c8fffe352f22f4be8 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:24:34 +0200 Subject: [PATCH 485/720] cs_instance: improve hypervisor argument and return --- cloud/cloudstack/cs_instance.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index c2c219febac..734ffb62d46 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -326,6 +326,11 @@ tags: returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' +hypervisor: + description: Hypervisor related to this instance. + returned: success + type: string + sample: KVM ''' import base64 @@ -712,6 +717,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['account'] = instance['account'] if 'project' in instance: self.result['project'] = instance['project'] + if 'hypervisor' in instance: + self.result['hypervisor'] = instance['hypervisor'] if 'publicip' in instance: self.result['public_ip'] = instance['public_ip'] if 'passwordenabled' in instance: @@ -771,7 +778,7 @@ def main(): disk_offering = dict(default=None), disk_size = dict(type='int', default=None), keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), - hypervisor = dict(default=None), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), domain = dict(default=None), From 17504f0a268094e3b2ee4832435ebe80e34e167c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:26:00 +0200 Subject: [PATCH 486/720] cloudstack: add instance_name alias internal name to returns in cs_instance --- cloud/cloudstack/cs_instance.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 734ffb62d46..13fc57991d3 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -331,6 +331,11 @@ hypervisor: returned: success type: string sample: KVM +instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: string + sample: i-44-3992-VM ''' import base64 @@ -719,6 +724,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['project'] = instance['project'] if 'hypervisor' in instance: self.result['hypervisor'] = instance['hypervisor'] + if 'instancename' in instance: + self.result['instance_name'] = instance['instancename'] if 'publicip' in instance: self.result['public_ip'] = instance['public_ip'] if 'passwordenabled' in instance: From a425c413be6671921a04806624674b9daea2b0c2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:28:06 +0200 Subject: [PATCH 487/720] cloudstack: update doc in cs_instance --- cloud/cloudstack/cs_instance.py | 36 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 13fc57991d3..c2dd45fe2b5 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' module: cs_instance short_description: Manages instances and virtual machines on Apache CloudStack based clouds. description: - - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. + - Deploy, start, restart, stop and destroy instances. version_added: '2.0' author: '"René Moser (@resmo)" ' options: @@ -49,22 +49,29 @@ options: choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] service_offering: description: - - Name or id of the service offering of the new instance. If not set, first found service offering is used. + - Name or id of the service offering of the new instance. + - If not set, first found service offering is used. required: false default: null template: description: - - Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option. + - Name or id of the template to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(ISO) option. required: false default: null iso: description: - - Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option. + - Name or id of the ISO to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(template) option. required: false default: null hypervisor: description: - - Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used. + - Name the hypervisor to be used for creating the new instance. + - Relevant when using C(state=present) and option C(ISO) is used. + - If not set, first found hypervisor will be used. required: false default: null choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] @@ -82,7 +89,7 @@ options: aliases: [ 'network' ] ip_address: description: - - IPv4 address for default instance's network during creation + - IPv4 address for default instance's network during creation. required: false default: null ip6_address: @@ -123,7 +130,8 @@ options: default: null zone: description: - - Name of the zone in which the instance shoud be deployed. If not set, default zone is used. + - Name of the zone in which the instance shoud be deployed. + - If not set, default zone is used. required: false default: null ssh_key: @@ -164,7 +172,7 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' -# Create a instance on CloudStack from an ISO +# Create a instance from an ISO # NOTE: Names of offerings and ISOs depending on the CloudStack configuration. - local_action: module: cs_instance @@ -181,7 +189,6 @@ EXAMPLES = ''' - Sync Integration - Storage Integration - # For changing a running instance, use the 'force' parameter - local_action: module: cs_instance @@ -191,7 +198,6 @@ EXAMPLES = ''' service_offering: 2cpu_2gb force: yes - # Create or update a instance on Exoscale's public cloud - local_action: module: cs_instance @@ -202,19 +208,13 @@ EXAMPLES = ''' tags: - { key: admin, value: john } - { key: foo, value: bar } - register: vm - -- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}' - # Ensure a instance has stopped - local_action: cs_instance name=web-vm-1 state=stopped - # Ensure a instance is running - local_action: cs_instance name=web-vm-1 state=started - # Remove a instance - local_action: cs_instance name=web-vm-1 state=absent ''' @@ -257,7 +257,7 @@ password: type: string sample: Ge2oe7Do ssh_key: - description: Name of ssh key deployed to instance. + description: Name of SSH key deployed to instance. returned: success type: string sample: key@work @@ -282,7 +282,7 @@ default_ip: type: string sample: 10.23.37.42 public_ip: - description: Public IP address with instance via static nat rule. + description: Public IP address with instance via static NAT rule. returned: success type: string sample: 1.2.3.4 From 506b4c46724fefdae42c44ffadba2118767e6069 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:46:20 +0200 Subject: [PATCH 488/720] cloudstack: update doc of cs_portforward, fixes typos. --- cloud/cloudstack/cs_portforward.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 47af7848ee1..cbd363f69e6 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -92,12 +92,13 @@ options: default: null project: description: - - Name of the project the c(vm) is located in. + - Name of the project the C(vm) is located in. required: false default: null zone: description: - - Name of the zone in which the virtual machine is in. If not set, default zone is used. + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. required: false default: null poll_async: @@ -117,7 +118,6 @@ EXAMPLES = ''' public_port: 80 private_port: 8080 - # forward SSH and open firewall - local_action: module: cs_portforward @@ -127,7 +127,6 @@ EXAMPLES = ''' private_port: 22 open_firewall: true - # forward DNS traffic, but do not open firewall - local_action: module: cs_portforward @@ -138,7 +137,6 @@ EXAMPLES = ''' protocol: udp open_firewall: true - # remove ssh port forwarding - local_action: module: cs_portforward @@ -161,26 +159,26 @@ protocol: type: string sample: tcp private_port: - description: Private start port. + description: Start port on the virtual machine's IP address. returned: success type: int sample: 80 private_end_port: - description: Private end port. + description: End port on the virtual machine's IP address. returned: success type: int public_port: - description: Public start port. + description: Start port on the public IP address. returned: success type: int sample: 80 public_end_port: - description: Public end port. + description: End port on the public IP address. returned: success type: int sample: 80 tags: - description: Tag srelated to the port forwarding. + description: Tags related to the port forwarding. returned: success type: list sample: [] @@ -201,7 +199,6 @@ vm_guest_ip: sample: 10.101.65.152 ''' - try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True From f31c7d9b055a56a81c126ce80506bd2634d1e0ba Mon Sep 17 00:00:00 2001 From: mlamatr Date: Fri, 29 May 2015 23:18:44 -0400 Subject: [PATCH 489/720] corrected typo in URL for consul.io --- clustering/consul.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul.py b/clustering/consul.py index 0baaae83b84..8423ffe418f 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ module: consul short_description: "Add, modify & delete services within a consul cluster. - See http://conul.io for more details." + See http://consul.io for more details." description: - registers services and checks for an agent with a consul cluster. A service is some process running on the agent node that should be advertised by From 6643ea5825457faabebe134757cc3cd59653b1ba Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:03:32 +0200 Subject: [PATCH 490/720] cloudstack: add new param api_timeout --- cloud/cloudstack/cs_account.py | 1 + cloud/cloudstack/cs_affinitygroup.py | 1 + cloud/cloudstack/cs_firewall.py | 1 + cloud/cloudstack/cs_instance.py | 1 + cloud/cloudstack/cs_instancegroup.py | 1 + cloud/cloudstack/cs_iso.py | 1 + cloud/cloudstack/cs_portforward.py | 1 + cloud/cloudstack/cs_securitygroup.py | 1 + cloud/cloudstack/cs_securitygroup_rule.py | 1 + cloud/cloudstack/cs_sshkeypair.py | 1 + cloud/cloudstack/cs_vmsnapshot.py | 1 + 11 files changed, 11 insertions(+) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index a8510bbc5b3..dc845acbae2 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -368,6 +368,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 9ff3b123a0c..afb60a83baa 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -222,6 +222,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index ef78b6a242d..fca8e88a509 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -421,6 +421,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_one_of = ( ['ip_address', 'network'], diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index c2dd45fe2b5..b6f2d098346 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -801,6 +801,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 9041e351539..01630bc225f 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -199,6 +199,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 4a97fc3d027..f38faeceeb4 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -332,6 +332,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index cbd363f69e6..e3a456e424b 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -403,6 +403,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 9ef81095322..8f1592ca43a 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -166,6 +166,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index a467d3f5c38..7afb1463503 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -401,6 +401,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['icmp_type', 'icmp_code'], diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index e7ee88e3bea..b4b764dbe33 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -218,6 +218,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index cadf229af55..218a947ac5a 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -291,6 +291,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['icmp_type', 'icmp_code'], From 16c70f96943e43b1af37f40317b7809d2cfc12f6 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:05:03 +0200 Subject: [PATCH 491/720] cloudstack: add choices for api_http_method --- cloud/cloudstack/cs_account.py | 6 +----- cloud/cloudstack/cs_affinitygroup.py | 3 +-- cloud/cloudstack/cs_firewall.py | 6 +----- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_instancegroup.py | 3 +-- cloud/cloudstack/cs_iso.py | 5 +---- cloud/cloudstack/cs_portforward.py | 2 +- cloud/cloudstack/cs_securitygroup.py | 3 +-- cloud/cloudstack/cs_securitygroup_rule.py | 6 +----- cloud/cloudstack/cs_sshkeypair.py | 2 +- cloud/cloudstack/cs_vmsnapshot.py | 4 +--- 11 files changed, 11 insertions(+), 31 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index dc845acbae2..597e4c7394e 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -108,7 +108,6 @@ local_action: email: john.doe@example.com domain: CUSTOMERS - # Lock an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -116,7 +115,6 @@ local_action: domain: CUSTOMERS state: locked - # Disable an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -124,7 +122,6 @@ local_action: domain: CUSTOMERS state: disabled - # Enable an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -132,7 +129,6 @@ local_action: domain: CUSTOMERS state: enabled - # Remove an account in domain 'CUSTOMERS' local_action: module: cs_account @@ -367,7 +363,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index afb60a83baa..40896942cb1 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -72,7 +72,6 @@ EXAMPLES = ''' name: haproxy affinty_type: host anti-affinity - # Remove a affinity group - local_action: module: cs_affinitygroup @@ -221,7 +220,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index fca8e88a509..828aa1faf98 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -115,7 +115,6 @@ EXAMPLES = ''' port: 80 cidr: 1.2.3.4/32 - # Allow inbound tcp/udp port 53 to 4.3.2.1 - local_action: module: cs_firewall @@ -126,7 +125,6 @@ EXAMPLES = ''' - tcp - udp - # Ensure firewall rule is removed - local_action: module: cs_firewall @@ -136,7 +134,6 @@ EXAMPLES = ''' cidr: 17.0.0.0/8 state: absent - # Allow all outbound traffic - local_action: module: cs_firewall @@ -144,7 +141,6 @@ EXAMPLES = ''' type: egress protocol: all - # Allow only HTTP outbound traffic for an IP - local_action: module: cs_firewall @@ -420,7 +416,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_one_of = ( diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index b6f2d098346..05cdc960e95 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -800,7 +800,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 01630bc225f..396cafa388d 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -61,7 +61,6 @@ EXAMPLES = ''' module: cs_instancegroup name: loadbalancers - # Remove an instance group - local_action: module: cs_instancegroup @@ -198,7 +197,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index f38faeceeb4..77ce85b505e 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -116,7 +116,6 @@ EXAMPLES = ''' url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: Debian GNU/Linux 7(64-bit) - # Register an ISO with given name if ISO md5 checksum does not already exist. - local_action: module: cs_iso @@ -125,14 +124,12 @@ EXAMPLES = ''' os_type: checksum: 0b31bccccb048d20b551f70830bb7ad0 - # Remove an ISO by name - local_action: module: cs_iso name: Debian 7 64-bit state: absent - # Remove an ISO by checksum - local_action: module: cs_iso @@ -331,7 +328,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index e3a456e424b..00b084d9195 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -402,7 +402,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 8f1592ca43a..08fb72c821d 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -57,7 +57,6 @@ EXAMPLES = ''' name: default description: default security group - # Remove a security group - local_action: module: cs_securitygroup @@ -165,7 +164,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 7afb1463503..9252e06ce62 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -102,7 +102,6 @@ EXAMPLES = ''' port: 80 cidr: 1.2.3.4/32 - # Allow tcp/udp outbound added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -115,7 +114,6 @@ EXAMPLES = ''' - tcp - udp - # Allow inbound icmp from 0.0.0.0/0 added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -124,7 +122,6 @@ EXAMPLES = ''' icmp_code: -1 icmp_type: -1 - # Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' - local_action: module: cs_securitygroup_rule @@ -132,7 +129,6 @@ EXAMPLES = ''' port: 80 state: absent - # Allow inbound port 80/tcp from security group web added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -400,7 +396,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index b4b764dbe33..0a54a1971bc 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -217,7 +217,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 218a947ac5a..fb7668640dc 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -88,7 +88,6 @@ EXAMPLES = ''' vm: web-01 snapshot_memory: yes - # Revert a VM to a snapshot after a failed upgrade - local_action: module: cs_vmsnapshot @@ -96,7 +95,6 @@ EXAMPLES = ''' vm: web-01 state: revert - # Remove a VM snapshot after successful upgrade - local_action: module: cs_vmsnapshot @@ -290,7 +288,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( From 35dd0025aac52b6896cfad467c5b1e03593464c6 Mon Sep 17 00:00:00 2001 From: Q Date: Sat, 30 May 2015 23:01:52 +1000 Subject: [PATCH 492/720] Update patch.py --- files/patch.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/files/patch.py b/files/patch.py index c2982e2380e..0932ed3556a 100644 --- a/files/patch.py +++ b/files/patch.py @@ -65,6 +65,13 @@ options: required: false type: "int" default: "0" + backup_copy: + description: + - passes --backup --version-control=numbered to patch, + producing numbered backup copies + required: false + type: "bool" + default: "False" note: - This module requires GNU I(patch) utility to be installed on the remote host. ''' @@ -101,7 +108,7 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0) return rc == 0 -def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False): +def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False): opts = ['--quiet', '--forward', '--batch', '--reject-file=-', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] @@ -109,6 +116,8 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru opts.append('--dry-run') if dest_file: opts.append("'%s'" % dest_file) + if backup: + opts.append('--backup --version-control=numbered') (rc, out, err) = patch_func(opts) if rc != 0: @@ -124,6 +133,8 @@ def main(): 'basedir': {}, 'strip': {'default': 0, 'type': 'int'}, 'remote_src': {'default': False, 'type': 'bool'}, + # don't call it "backup" since the semantics differs from the default one + 'backup_copy': { 'default': False, 'type': 'bool' } }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -156,8 +167,8 @@ def main(): changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: - apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, - dry_run=module.check_mode) + apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + dry_run=module.check_mode, backup=p.backup_copy ) changed = True except PatchError, e: module.fail_json(msg=str(e)) From 2189af8c9572fbae280c7b9dfa9878894d08314b Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:05:36 +0200 Subject: [PATCH 493/720] cloudstack: fix examples in cs_iso --- cloud/cloudstack/cs_iso.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 77ce85b505e..d9ec6880627 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -121,7 +121,7 @@ EXAMPLES = ''' module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso - os_type: + os_type: Debian GNU/Linux 7(64-bit) checksum: 0b31bccccb048d20b551f70830bb7ad0 # Remove an ISO by name From 6c29a181c8a0d4bb95614f64ef908c319547c395 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 18:28:41 +0200 Subject: [PATCH 494/720] cloudstack: fix doc for cs_instance, force is defaulted to false --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 05cdc960e95..46fd66f510d 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -156,7 +156,7 @@ options: description: - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. required: false - default: true + default: false tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). From d20fa0477ebab3ecf06f37771831e500b20ab8ad Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 22:54:56 +0200 Subject: [PATCH 495/720] cloudstack: add new module cs_project --- cloud/cloudstack/cs_project.py | 342 +++++++++++++++++++++++++++++++++ 1 file changed, 342 insertions(+) create mode 100644 cloud/cloudstack/cs_project.py diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py new file mode 100644 index 00000000000..b604a1b6f32 --- /dev/null +++ b/cloud/cloudstack/cs_project.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_project +short_description: Manages projects on Apache CloudStack based clouds. +description: + - Create, update, suspend, activate and remove projects. +version_added: '2.0' +author: '"René Moser (@resmo)" ' + name: + description: + - Name of the project. + required: true + displaytext: + description: + - Displaytext of the project. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + state: + description: + - State of the project. + required: false + default: 'present' + choices: [ 'present', 'absent', 'active', 'suspended' ] + domain: + description: + - Domain the project is related to. + required: false + default: null + account: + description: + - Account the project is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a project +- local_action: + module: cs_project + name: web + +# Rename a project +- local_action: + module: cs_project + name: web + displaytext: my web project + +# Suspend an existing project +- local_action: + module: cs_project + name: web + state: suspended + +# Activate an existing project +- local_action: + module: cs_project + name: web + state: active + +# Remove a project +- local_action: + module: cs_project + name: web + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the project. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the project. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the project. + returned: success + type: string + sample: web project +state: + description: State of the project. + returned: success + type: string + sample: Active +domain: + description: Domain the project is related to. + returned: success + type: string + sample: example domain +account: + description: Account the project is related to. + returned: success + type: string + sample: example account +tags: + description: List of resource tags associated with the project. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackProject(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.project = None + + + def get_displaytext(self): + displaytext = self.module.params.get('displaytext') + if not displaytext: + displaytext = self.module.params.get('name') + return displaytext + + + def get_project(self): + if not self.project: + project = self.module.params.get('name') + + args = {} + args['listall'] = True + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + projects = self.cs.listProjects(**args) + if projects: + for p in projects['project']: + if project in [ p['name'], p['id']]: + self.project = p + break + return self.project + + + def present_project(self): + project = self.get_project() + if not project: + project = self.create_project(project) + else: + project = self.update_project(project) + return project + + + def update_project(self, project): + args = {} + args['id'] = project['id'] + args['displaytext'] = self.get_displaytext() + + if self._has_changed(args, project): + self.result['changed'] = True + if not self.module.check_mode: + project = self.cs.updateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def create_project(self, project): + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_displaytext() + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + if not self.module.check_mode: + project = self.cs.createProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def state_project(self, state=None): + project = self.get_project() + + if not project: + self.module.fail_json(msg="No project named '%s' found." % self.module.params('name')) + + if project['state'].lower() != state: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + if state == 'suspended': + project = self.cs.suspendProject(**args) + else: + project = self.cs.activateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def absent_project(self): + project = self.get_project() + if project: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + res = self.cs.deleteProject(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'project') + return project + + + def get_result(self, project): + if project: + if 'name' in project: + self.result['name'] = project['name'] + if 'displaytext' in project: + self.result['displaytext'] = project['displaytext'] + if 'account' in project: + self.result['account'] = project['account'] + if 'domain' in project: + self.result['domain'] = project['domain'] + if 'state' in project: + self.result['state'] = project['state'] + if 'tags' in project: + self.result['tags'] = [] + for tag in project['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_project = AnsibleCloudStackProject(module) + + state = module.params.get('state') + if state in ['absent']: + project = acs_project.absent_project() + + elif state in ['active', 'suspended']: + project = acs_project.state_project(state=state) + + else: + project = acs_project.present_project() + + result = acs_project.get_result(project) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From f731bcc2d58c5f8cdf244e15b19c1e93f62fb2b9 Mon Sep 17 00:00:00 2001 From: fdupoux Date: Sun, 31 May 2015 12:38:45 +0100 Subject: [PATCH 496/720] Devices in the current_devs list must also be converted to absolute device paths so comparison with dev_list works --- system/lvg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvg.py b/system/lvg.py index 955b94668dc..3c6c5ef2930 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -211,7 +211,7 @@ def main(): module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) ### resize VG - current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] + current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) From 4690237b7b2989c23c52ce551859f3442c9b5ac3 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 1 Jun 2015 08:59:50 -0400 Subject: [PATCH 497/720] Add new policy guidelines for Extras More to do here, but this is a start. --- CONTRIBUTING.md | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..38b95840a77 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,28 +1,37 @@ -Welcome To Ansible GitHub -========================= +Contributing to ansible-modules-extras +====================================== -Hi! Nice to see you here! +The Ansible Extras Modules are written and maintained by the Ansible community, according to the following contribution guidelines. + +If you'd like to contribute code +================================ + +Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. + +If you'd like to contribute code to an existing module +====================================================== +Each module in Extras is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + +If you'd like to contribute a new module +======================================== +Ansible welcomes new modules. Please be certain that you've read the [module development guide and standards](http://docs.ansible.com/developing_modules.html) thoroughly before submitting your module. + +Each new module requires two current module owners to approve a new module for inclusion. The Ansible community reviews new modules as often as possible, but please be patient; there are a lot of new module submissions in the pipeline, and it takes time to evaluate a new module for its adherence to module standards. + +Once your module is accepted, you become responsible for maintenance of that module, which means responding to pull requests and issues in a reasonably timely manner. If you'd like to ask a question =============================== Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. -The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. - -If you'd like to contribute code -================================ - -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. +The Github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. If you'd like to file a bug =========================== -I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. +Read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. Also please make sure you are testing on the latest released version of Ansible or the development branch. Thanks! - - - From 223694ccf270da8b1b5a5d61bc05771c111dda1c Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 1 Jun 2015 12:07:23 -0400 Subject: [PATCH 498/720] Revert "Added eval for pasting tag lists" --- monitoring/datadog_event.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index a3ac92a03bb..1d6a98dc9c3 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -116,10 +116,7 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - if module.params['tags'].startswith("[") and module.params['tags'].endswith("]"): - body['tags'] = eval(module.params['tags']) - else: - body['tags'] = module.params['tags'].split(",") + body['tags'] = module.params['tags'].split(",") if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From 4b35db4932daa1d583ffbdc5b67be00c2cbe7a2f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 12:31:20 -0400 Subject: [PATCH 499/720] added version added --- monitoring/nagios.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index bfa498496e6..a1ba1be3f54 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -52,6 +52,7 @@ options: required: false default: Ansible comment: + version_added: "2.0" description: - Comment for C(downtime) action. required: false From 3861904b02b72b3a3f1fe044603a483855953d6a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 12:36:49 -0400 Subject: [PATCH 500/720] updated docs for 2.0 --- monitoring/nagios.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 38a1f8c161a..543f094b70e 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -30,6 +30,7 @@ options: action: description: - Action to take. + - servicegroup options were added in 2.0. required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", @@ -73,6 +74,7 @@ options: required: true default: null servicegroup: + version_added: "2.0" description: - the Servicegroup we want to set downtimes/alerts for. B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). From 1cc0b4c9e648db83bee4dea19327e7713888f8bc Mon Sep 17 00:00:00 2001 From: David Wittman Date: Tue, 21 Oct 2014 16:56:13 -0500 Subject: [PATCH 501/720] [lvol] Add opts parameter Adds the ability to set options to be passed to the lvcreate command using the `opts` parameter. --- system/lvol.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index c49cb369440..d807f9e8336 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -57,6 +57,10 @@ options: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. required: false + opts: + version_added: "1.9" + description: + - Free-form options to be passed to the lvcreate command notes: - Filesystems on top of the volume are not resized. ''' @@ -71,6 +75,9 @@ EXAMPLES = ''' # Create a logical volume the size of all remaining space in the volume group - lvol: vg=firefly lv=test size=100%FREE +# Create a logical volume with special options +- lvol: vg=firefly lv=test size=512g opts="-r 16" + # Extend the logical volume to 1024m. - lvol: vg=firefly lv=test size=1024 @@ -116,6 +123,7 @@ def main(): vg=dict(required=True), lv=dict(required=True), size=dict(), + opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), @@ -135,11 +143,15 @@ def main(): vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] + opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) size_opt = 'L' size_unit = 'm' + if opts is None: + opts = "" + if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: @@ -212,7 +224,8 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s %s -n %s -%s %s%s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, vg)) + cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg) + rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: From 307424c69419a57d7be50b2a85fb109293f5d91f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 15:27:55 -0400 Subject: [PATCH 502/720] added copyright/license info to modules I had missed --- notification/jabber.py | 18 ++++++++++++++++++ system/svc.py | 17 +++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/notification/jabber.py b/notification/jabber.py index 466c72d1570..1a19140a83d 100644 --- a/notification/jabber.py +++ b/notification/jabber.py @@ -1,5 +1,23 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see + DOCUMENTATION = ''' --- diff --git a/system/svc.py b/system/svc.py index 0227a69ecd8..9831ce42ea7 100644 --- a/system/svc.py +++ b/system/svc.py @@ -1,5 +1,22 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see DOCUMENTATION = ''' --- From 61aab829ed4801c3c86ae10a5142400fd2e67d0f Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Mon, 1 Jun 2015 15:15:37 -0500 Subject: [PATCH 503/720] lxc_container: remove BabyJSON Removed the usage of baby json. This is in response to the fact that the baby json functionality was removed in Ansible 1.8 Ref: https://github.com/ansible/ansible-modules-extras/issues/430 --- cloud/lxc/lxc_container.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 119d45069c3..b2dba2111e4 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -383,9 +383,7 @@ EXAMPLES = """ try: import lxc except ImportError: - msg = 'The lxc module is not importable. Check the requirements.' - print("failed=True msg='%s'" % msg) - raise SystemExit(msg) + HAS_LXC = False # LXC_COMPRESSION_MAP is a map of available compression types when creating @@ -1706,6 +1704,11 @@ def main(): supports_check_mode=False, ) + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + lv_name = module.params.get('lv_name') if not lv_name: module.params['lv_name'] = module.params.get('name') From 858f9e3601f58dc16ede3056dc4aeff4f8da7cb0 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Mon, 1 Jun 2015 15:31:56 -0500 Subject: [PATCH 504/720] Updates the doc information for the python2-lxc dep The python2-lxc library has been uploaded to pypi as such this commit updates the requirements and doc information for the module such that it instructs the user to install the pip package "lxc-python2" while also noting that the package could be gotten from source as well. In the update comments have been added to the requirements list which notes where the package should come from, Closes-Bug: https://github.com/ansible/ansible-modules-extras/issues/550 --- cloud/lxc/lxc_container.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 119d45069c3..15d76df79a0 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -173,9 +173,9 @@ options: - list of 'key=value' options to use when configuring a container. required: false requirements: - - 'lxc >= 1.0' - - 'python >= 2.6' - - 'python2-lxc >= 0.1' + - 'lxc >= 1.0 # OS package' + - 'python >= 2.6 # OS Package' + - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' notes: - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module will @@ -195,7 +195,8 @@ notes: creating the archive. - If your distro does not have a package for "python2-lxc", which is a requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" + "https://github.com/lxc/python2-lxc" or installed via pip using the package + name lxc-python2. """ EXAMPLES = """ From ffdb8d9eb479b6b47090d4a9173f920a76facbbd Mon Sep 17 00:00:00 2001 From: Q Date: Tue, 2 Jun 2015 13:32:22 +1000 Subject: [PATCH 505/720] patch module: 'backup_copy' parameter renamed to 'backup' --- files/patch.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/files/patch.py b/files/patch.py index 0932ed3556a..085784e7de5 100644 --- a/files/patch.py +++ b/files/patch.py @@ -65,7 +65,7 @@ options: required: false type: "int" default: "0" - backup_copy: + backup: description: - passes --backup --version-control=numbered to patch, producing numbered backup copies @@ -133,8 +133,9 @@ def main(): 'basedir': {}, 'strip': {'default': 0, 'type': 'int'}, 'remote_src': {'default': False, 'type': 'bool'}, - # don't call it "backup" since the semantics differs from the default one - 'backup_copy': { 'default': False, 'type': 'bool' } + # NB: for 'backup' parameter, semantics is slightly different from standard + # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") + 'backup': { 'default': False, 'type': 'bool' } }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -168,7 +169,7 @@ def main(): if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, - dry_run=module.check_mode, backup=p.backup_copy ) + dry_run=module.check_mode, backup=p.backup ) changed = True except PatchError, e: module.fail_json(msg=str(e)) From 1c3afeadfc3a68173b652a8c0bf08646cf3ac1ab Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Tue, 2 Jun 2015 09:25:55 +0100 Subject: [PATCH 506/720] Add GPL notices --- cloud/webfaction/webfaction_app.py | 21 ++++++++++++++++++++- cloud/webfaction/webfaction_db.py | 23 +++++++++++++++++++++-- cloud/webfaction/webfaction_domain.py | 21 ++++++++++++++++++++- cloud/webfaction/webfaction_mailbox.py | 20 +++++++++++++++++++- cloud/webfaction/webfaction_site.py | 21 ++++++++++++++++++++- 5 files changed, 100 insertions(+), 6 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 20e94a7b5f6..55599bdcca6 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -1,10 +1,29 @@ #! /usr/bin/python +# # Create a Webfaction application using Ansible and the Webfaction API # # Valid application types can be found by looking here: # http://docs.webfaction.com/xmlrpc-api/apps.html#application-types # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 784477c5409..a9ef88b943e 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -1,7 +1,26 @@ #! /usr/bin/python -# Create webfaction database using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# Create a webfaction database using Ansible and the Webfaction API +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index c99a0f23f6d..f2c95897bc5 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -1,7 +1,26 @@ #! /usr/bin/python +# # Create Webfaction domains and subdomains using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 87ca1fd1a26..976a428f3d3 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -1,7 +1,25 @@ #! /usr/bin/python +# # Create webfaction mailbox using Ansible and the Webfaction API # -# Quentin Stafford-Fraser and Andy Baker 2015 +# ------------------------------------------ +# (c) Quentin Stafford-Fraser and Andy Baker 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index a5be4f5407b..223458faf46 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -1,7 +1,26 @@ #! /usr/bin/python +# # Create Webfaction website using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- From 739defc595bb7769aef1627e09be50784b14189e Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 16:26:32 +0600 Subject: [PATCH 507/720] Added proxmox_template module --- cloud/misc/proxmox_template.py | 245 +++++++++++++++++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 cloud/misc/proxmox_template.py diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py new file mode 100644 index 00000000000..d07a406122c --- /dev/null +++ b/cloud/misc/proxmox_template.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: proxmox_template +short_description: management of OS templates in Proxmox VE cluster +description: + - allows you to list/upload/delete templates in Proxmox VE cluster +version_added: "2.0" +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + default: null + required: false + https_verify_ssl: + description: + - enable / disable https certificate verification + default: false + required: false + type: boolean + node: + description: + - Proxmox VE node, when you will operate with template + default: null + required: true + src: + description: + - path to uploaded file + - required only for C(state=present) + default: null + required: false + aliases: ['path'] + template: + description: + - the template name + - required only for states C(absent), C(info) + default: null + required: false + content_type: + description: + - content type + - required only for C(state=present) + default: 'vztmpl' + required: false + choices: ['vztmpl', 'iso'] + storage: + description: + - target storage + default: 'local' + required: false + type: string + timeout: + description: + - timeout for operations + default: 300 + required: false + type: integer + force: + description: + - can be used only with C(state=present), exists template will be overwritten + default: false + required: false + type: boolean + state: + description: + - Indicate desired state of the template + choices: ['present', 'absent', 'list'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: "Sergei Antipov @UnderGreen" +''' + +EXAMPLES = ''' +# Upload new openvz template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with all options and force overwrite +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes + +# Delete template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent + +# List content of storage(it returns list of dicts) +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' state=list +''' + +import os +import time + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +def get_template(proxmox, node, storage, content_type, template): + return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get() + if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ] + +def get_content(proxmox, node, storage): + return proxmox.nodes(node).storage(storage).content.get() + +def upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) + while timeout: + task_status = proxmox.nodes(node).tasks(taskid).status.get() + if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s' + % proxmox.node(node).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def delete_template(module, proxmox, node, storage, content_type, template, timeout): + volid = '%s:%s/%s' % (storage, content_type, template) + proxmox.nodes(node).storage(storage).content.delete(volid) + while timeout: + if not get_template(proxmox, node, storage, content_type, template): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for deleting template.') + + time.sleep(1) + return False + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(no_log=True), + https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + node = dict(), + src = dict(), + template = dict(), + content_type = dict(default='vztmpl', choices=['vztmpl','iso']), + storage = dict(default='local'), + timeout = dict(type='int', default=300), + force = dict(type='bool', choices=BOOLEANS, default='no'), + state = dict(default='present', choices=['present', 'absent', 'list']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + https_verify_ssl = module.params['https_verify_ssl'] + node = module.params['node'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError, e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + except Exception, e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + content_type = module.params['content_type'] + src = module.params['src'] + + from ansible import utils + realpath = utils.path_dwim(None, src) + template = os.path.basename(realpath) + if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) + elif not src: + module.fail_json(msg='src param to uploading template file is mandatory') + elif not (os.path.exists(realpath) and os.path.isfile(realpath)): + module.fail_json(msg='template file on path %s not exists' % realpath) + + if upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) + + elif state == 'absent': + try: + content_type = module.params['content_type'] + template = module.params['template'] + + if not template: + module.fail_json(msg='template param is mandatory') + elif not get_template(proxmox, node, storage, content_type, template): + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) + + if delete_template(module, proxmox, node, storage, content_type, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) + + elif state == 'list': + try: + + module.exit_json(changed=False, templates=get_content(proxmox, node, storage)) + except Exception, e: + module.fail_json(msg="listing of templates %s failed with exception: %s" % ( template, e )) + +# import module snippets +from ansible.module_utils.basic import * +main() From 282393c27b57ea63d649531847674f1863860648 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 18:21:36 +0600 Subject: [PATCH 508/720] proxmox_template | fixed problem with uploading --- cloud/misc/proxmox_template.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index d07a406122c..b1d94d96234 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -129,10 +129,10 @@ def get_template(proxmox, node, storage, content_type, template): def get_content(proxmox, node, storage): return proxmox.nodes(node).storage(storage).content.get() -def upload_template(module, proxmox, node, storage, content_type, realpath, timeout): +def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) while timeout: - task_status = proxmox.nodes(node).tasks(taskid).status.get() + task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get() if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': return True timeout = timeout - 1 @@ -213,7 +213,7 @@ def main(): elif not (os.path.exists(realpath) and os.path.isfile(realpath)): module.fail_json(msg='template file on path %s not exists' % realpath) - if upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) except Exception, e: module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) From 5da651212ff84c250a3782830e3fb2ca48003dd6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 08:37:45 -0400 Subject: [PATCH 509/720] push list nature of tags into spec to allow both for comma delimited strings and actual lists --- monitoring/datadog_event.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 1d6a98dc9c3..90cbccc9593 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -86,7 +86,7 @@ def main(): priority=dict( required=False, default='normal', choices=['normal', 'low'] ), - tags=dict(required=False, default=None), + tags=dict(required=False, default=None, type='list'), alert_type=dict( required=False, default='info', choices=['error', 'warning', 'info', 'success'] @@ -116,7 +116,7 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") + body['tags'] = module.params['tags'] if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From b8df0da2308cafb18d9a492615888df4d596ce7f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 08:48:20 -0400 Subject: [PATCH 510/720] added version added to patch's bacukp --- files/patch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/patch.py b/files/patch.py index 085784e7de5..c1a61ce733f 100644 --- a/files/patch.py +++ b/files/patch.py @@ -66,6 +66,7 @@ options: type: "int" default: "0" backup: + version_added: "2.0" description: - passes --backup --version-control=numbered to patch, producing numbered backup copies From af7463e46e6d24777ea2934ddf05d8710c16de73 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:26:32 +0600 Subject: [PATCH 511/720] proxmox_template | changed http_verify_ssl to validate_certs --- cloud/misc/proxmox_template.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index b1d94d96234..4bf71f62b12 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -36,7 +36,7 @@ options: - you can use PROXMOX_PASSWORD environment variable default: null required: false - https_verify_ssl: + validate_certs: description: - enable / disable https certificate verification default: false @@ -162,7 +162,7 @@ def main(): api_host = dict(required=True), api_user = dict(required=True), api_password = dict(no_log=True), - https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), node = dict(), src = dict(), template = dict(), @@ -181,7 +181,7 @@ def main(): api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] - https_verify_ssl = module.params['https_verify_ssl'] + validate_certs = module.params['validate_certs'] node = module.params['node'] storage = module.params['storage'] timeout = module.params['timeout'] @@ -194,7 +194,7 @@ def main(): module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: - proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) except Exception, e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) From 51f9225754c42007ba9633ea6ab63765048fe054 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:29:19 +0600 Subject: [PATCH 512/720] proxmox_template | deleted state=list and changed default timeout to 30 --- cloud/misc/proxmox_template.py | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index 4bf71f62b12..7fed47f7260 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' module: proxmox_template short_description: management of OS templates in Proxmox VE cluster description: - - allows you to list/upload/delete templates in Proxmox VE cluster + - allows you to upload/delete templates in Proxmox VE cluster version_added: "2.0" options: api_host: @@ -76,7 +76,7 @@ options: timeout: description: - timeout for operations - default: 300 + default: 30 required: false type: integer force: @@ -88,7 +88,7 @@ options: state: description: - Indicate desired state of the template - choices: ['present', 'absent', 'list'] + choices: ['present', 'absent'] default: present notes: - Requires proxmoxer and requests modules on host. This modules can be installed with pip. @@ -108,9 +108,6 @@ EXAMPLES = ''' # Delete template with minimal options - proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent - -# List content of storage(it returns list of dicts) -- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' state=list ''' import os @@ -126,9 +123,6 @@ def get_template(proxmox, node, storage, content_type, template): return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get() if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ] -def get_content(proxmox, node, storage): - return proxmox.nodes(node).storage(storage).content.get() - def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) while timeout: @@ -168,9 +162,9 @@ def main(): template = dict(), content_type = dict(default='vztmpl', choices=['vztmpl','iso']), storage = dict(default='local'), - timeout = dict(type='int', default=300), + timeout = dict(type='int', default=30), force = dict(type='bool', choices=BOOLEANS, default='no'), - state = dict(default='present', choices=['present', 'absent', 'list']), + state = dict(default='present', choices=['present', 'absent']), ) ) @@ -233,13 +227,6 @@ def main(): except Exception, e: module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) - elif state == 'list': - try: - - module.exit_json(changed=False, templates=get_content(proxmox, node, storage)) - except Exception, e: - module.fail_json(msg="listing of templates %s failed with exception: %s" % ( template, e )) - # import module snippets from ansible.module_utils.basic import * main() From e337b67cf12f382cf9420fa4d5c7a7ab5b9cad88 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:53:47 +0600 Subject: [PATCH 513/720] proxmox | changed https_verify_ssl to to validate_certs and added forgotten return --- cloud/misc/proxmox.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index f3ee1962891..7be4361edbe 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -41,7 +41,7 @@ options: - the instance id default: null required: true - https_verify_ssl: + validate_certs: description: - enable / disable https certificate verification default: false @@ -219,6 +219,7 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw % proxmox_node.tasks(taskid).log.get()[:1]) time.sleep(1) + return False def start_instance(module, proxmox, vm, vmid, timeout): taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() @@ -272,7 +273,7 @@ def main(): api_user = dict(required=True), api_password = dict(no_log=True), vmid = dict(required=True), - https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), node = dict(), password = dict(no_log=True), hostname = dict(), @@ -302,7 +303,7 @@ def main(): api_host = module.params['api_host'] api_password = module.params['api_password'] vmid = module.params['vmid'] - https_verify_ssl = module.params['https_verify_ssl'] + validate_certs = module.params['validate_certs'] node = module.params['node'] disk = module.params['disk'] cpus = module.params['cpus'] @@ -319,7 +320,7 @@ def main(): module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: - proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) except Exception, e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) From 198e77e5fb519f92c72ca6ab514bbea922d30248 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 14:11:51 -0400 Subject: [PATCH 514/720] corrected lvol docs version to 2.0 --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d807f9e8336..3225408d162 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -58,7 +58,7 @@ options: that filesystems get never corrupted/destroyed by mistake. required: false opts: - version_added: "1.9" + version_added: "2.0" description: - Free-form options to be passed to the lvcreate command notes: From 328b133a33446123558d34a0e35164b23929a11e Mon Sep 17 00:00:00 2001 From: Roman Vyakhirev Date: Wed, 3 Jun 2015 01:57:15 +0300 Subject: [PATCH 515/720] composer module. ignore_platform_reqs option added. --- packaging/language/composer.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 5bbd948595a..cfe3f99b9e7 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -82,6 +82,14 @@ options: default: "yes" choices: [ "yes", "no" ] aliases: [ "optimize-autoloader" ] + ignore_platform_reqs: + version_added: "2.0" + description: + - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "ignore-platform-reqs" ] requirements: - php - composer installed in bin path (recommended /usr/local/bin) @@ -116,14 +124,15 @@ def composer_install(module, command, options): def main(): module = AnsibleModule( argument_spec = dict( - command = dict(default="install", type="str", required=False), - working_dir = dict(aliases=["working-dir"], required=True), - prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), - prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), - no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), - no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), - no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), - optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + command = dict(default="install", type="str", required=False), + working_dir = dict(aliases=["working-dir"], required=True), + prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), + prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), + no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), + no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), + no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), + optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + ignore_platform_reqs = dict(default="no", type="bool", aliases=["ignore-platform-reqs"]), ), supports_check_mode=True ) @@ -153,6 +162,8 @@ def main(): options.append('--no-plugins') if module.params['optimize_autoloader']: options.append('--optimize-autoloader') + if module.params['ignore_platform_reqs']: + options.append('--ignore-platform-reqs') if module.check_mode: options.append('--dry-run') From a0905a9d5ecf9101288080324483fb8ca56f87ba Mon Sep 17 00:00:00 2001 From: Etienne CARRIERE Date: Wed, 3 Jun 2015 08:22:18 +0200 Subject: [PATCH 516/720] Factor common functions for F5 modules --- network/f5/bigip_monitor_http.py | 61 ++++++------------------------ network/f5/bigip_monitor_tcp.py | 64 +++++++------------------------- network/f5/bigip_node.py | 52 +++++--------------------- network/f5/bigip_pool.py | 56 ++++++---------------------- network/f5/bigip_pool_member.py | 54 ++++++--------------------- 5 files changed, 58 insertions(+), 229 deletions(-) diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 6a31afb2ee7..5299bdb0f44 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -163,35 +163,10 @@ EXAMPLES = ''' name: "{{ monitorname }}" ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = 'TTYPE_HTTP' DEFAULT_PARENT_TYPE = 'http' -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def disable_ssl_cert_validation(): - - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def check_monitor_exists(module, api, monitor, parent): @@ -278,7 +253,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -321,15 +295,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - validate_certs = dict(default='yes', type='bool'), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update( dict( name = dict(required=True), parent = dict(default=DEFAULT_PARENT_TYPE), parent_partition = dict(default='Common'), @@ -341,20 +308,20 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] receive_disable = module.params['receive_disable'] @@ -366,11 +333,6 @@ def main(): # end monitor specific stuff - if not validate_certs: - disable_ssl_cert_validation() - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -481,5 +443,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index d5855e0f15d..b5f58da8397 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -181,37 +181,11 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def disable_ssl_cert_validation(): - - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - - def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -234,7 +208,7 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: + try: api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) except bigsuds.OperationFailed, e: if "already exists" in str(e): @@ -298,7 +272,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -341,15 +314,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - validate_certs = dict(default='yes', type='bool'), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(required=True), type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), parent = dict(default=DEFAULT_PARENT), @@ -361,21 +327,21 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] type = 'TTYPE_' + module.params['type'].upper() - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] ip = module.params['ip'] @@ -390,11 +356,6 @@ def main(): # end monitor specific stuff - if not validate_certs: - disable_ssl_cert_validation() - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -506,5 +467,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 31e34fdeb47..49f721aa8c5 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -188,27 +188,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# ========================== -# bigip_node module specific -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def node_exists(api, address): # hack to determine if node exists result = False @@ -283,42 +262,30 @@ def get_node_monitor_status(api, name): def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( session_state = dict(type='str', choices=['enabled', 'disabled']), monitor_state = dict(type='str', choices=['enabled', 'disabled']), - partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), description = dict(type='str') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] - partition = module.params['partition'] host = module.params['host'] name = module.params['name'] - address = "/%s/%s" % (partition, name) + address = fq_name(partition, name) description = module.params['description'] - if not validate_certs: - disable_ssl_cert_validation() - if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") @@ -410,5 +377,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 2eaaf8f3a34..4d8d599134e 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -228,27 +228,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -368,15 +347,9 @@ def main(): service_down_choices = ['none', 'reset', 'drop', 'reselect'] - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(type='str', required=True, aliases=['pool']), - partition = dict(type='str', default='Common'), lb_method = dict(type='str', choices=lb_method_choices), monitor_type = dict(type='str', choices=monitor_type_choices), quorum = dict(type='int'), @@ -385,21 +358,18 @@ def main(): service_down_action = dict(type='str', choices=service_down_choices), host = dict(type='str', aliases=['address']), port = dict(type='int') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] name = module.params['name'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, name) + pool = fq_name(partition,name) lb_method = module.params['lb_method'] if lb_method: lb_method = lb_method.lower() @@ -411,16 +381,13 @@ def main(): if monitors: monitors = [] for monitor in module.params['monitors']: - if "/" not in monitor: - monitors.append("/%s/%s" % (partition, monitor)) - else: - monitors.append(monitor) + monitors.append(fq_name(partition, monitor)) slow_ramp_time = module.params['slow_ramp_time'] service_down_action = module.params['service_down_action'] if service_down_action: service_down_action = service_down_action.lower() host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition,host) port = module.params['port'] if not validate_certs: @@ -551,5 +518,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index bc4b7be2f7b..1d59462023f 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -196,27 +196,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool_member module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -327,49 +306,37 @@ def get_member_monitor_status(api, pool, address, port): return result def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec = f5_argument_spec(); + argument_spec.update(dict( session_state = dict(type='str', choices=['enabled', 'disabled']), monitor_state = dict(type='str', choices=['enabled', 'disabled']), pool = dict(type='str', required=True), - partition = dict(type='str', default='Common'), host = dict(type='str', required=True, aliases=['address', 'name']), port = dict(type='int', required=True), connection_limit = dict(type='int'), description = dict(type='str'), rate_limit = dict(type='int'), ratio = dict(type='int') - ), - supports_check_mode=True + ) ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + module = AnsibleModule( + argument_spec = argument_spec, + supports_check_mode=True + ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, module.params['pool']) + pool = fq_name(partition, module.params['pool']) connection_limit = module.params['connection_limit'] description = module.params['description'] rate_limit = module.params['rate_limit'] ratio = module.params['ratio'] host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition, host) port = module.params['port'] - if not validate_certs: - disable_ssl_cert_validation() # sanity check user supplied values @@ -457,5 +424,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() From 9ee29fa5798ca9149b78a01cfcfa6a0ce61114f4 Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Wed, 3 Jun 2015 13:15:59 +0200 Subject: [PATCH 517/720] Added datadog_monitor module --- monitoring/datadog_monitor.py | 278 ++++++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 monitoring/datadog_monitor.py diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py new file mode 100644 index 00000000000..b5ad2d2d6d6 --- /dev/null +++ b/monitoring/datadog_monitor.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Sebastian Kornehl +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# import module snippets + +# Import Datadog +try: + from datadog import initialize, api + HAS_DATADOG = True +except: + HAS_DATADOG = False + +DOCUMENTATION = ''' +--- +module: datadog_monitor +short_description: Manages Datadog monitors +description: +- "Manages monitors within Datadog" +- "Options like described on http://docs.datadoghq.com/api/" +version_added: "2.0" +author: '"Sebastian Kornehl" ' +notes: [] +requirements: [datadog] +options: + api_key: + description: ["Your DataDog API key."] + required: true + default: null + app_key: + description: ["Your DataDog app key."] + required: true + default: null + state: + description: ["The designated state of the monitor."] + required: true + default: null + choices: ['present', 'absent', 'muted', 'unmuted'] + type: + description: ["The type of the monitor."] + required: false + default: null + choices: ['metric alert', 'service check'] + query: + description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] + required: false + default: null + name: + description: ["The name of the alert."] + required: true + default: null + message: + description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] + required: false + default: null + silenced: + description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "] + required: false + default: "" + notify_no_data: + description: ["A boolean indicating whether this monitor will notify when data stops reporting.."] + required: false + default: False + no_data_timeframe: + description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."] + required: false + default: 2x timeframe for metric, 2 minutes for service + timeout_h: + description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."] + required: false + default: null + renotify_interval: + description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."] + required: false + default: null + escalation_message: + description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"] + required: false + default: null + notify_audit: + description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."] + required: false + default: False + thresholds: + description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."] + required: false + default: {'ok': 1, 'critical': 1, 'warning': 1} +''' + +EXAMPLES = ''' +# Create a metric monitor +datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + query: "datadog.agent.up".over("host:host1").last(2).count_by_status()" + message: "Some message." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Deletes a monitor +datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Mutes a monitor +datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Unmutes a monitor +datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True), + app_key=dict(required=True), + state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']), + type=dict(required=False, choises=['metric alert', 'service check']), + name=dict(required=True), + query=dict(required=False), + message=dict(required=False, default=None), + silenced=dict(required=False, default=None, type='dict'), + notify_no_data=dict(required=False, default=False, choices=BOOLEANS), + no_data_timeframe=dict(required=False, default=None), + timeout_h=dict(required=False, default=None), + renotify_interval=dict(required=False, default=None), + escalation_message=dict(required=False, default=None), + notify_audit=dict(required=False, default=False, choices=BOOLEANS), + thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg='datadogpy required for this module') + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _get_monitor(module): + for monitor in api.Monitor.get_all(): + if monitor['name'] == module.params['name']: + return monitor + return {} + + +def _post_monitor(module, options): + try: + msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def _update_monitor(module, monitor, options): + try: + msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + if len(set(msg) - set(monitor)) == 0: + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif (module.params['silenced'] is not None + and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() From ab8de7a3e7f6b62119b3c65f74f96ea06ab3572f Mon Sep 17 00:00:00 2001 From: Roman Vyakhirev Date: Thu, 4 Jun 2015 01:25:08 +0300 Subject: [PATCH 518/720] bower module. Non-interactive mode and allow-root moved to _exec, they should affect all commands --- packaging/language/bower.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 34284356f6e..8fbe20f7e0c 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -86,7 +86,7 @@ class Bower(object): def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = ["bower"] + args + cmd = ["bower"] + args + ['--config.interactive=false', '--allow-root'] if self.name: cmd.append(self.name_version) @@ -108,7 +108,7 @@ class Bower(object): return '' def list(self): - cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] + cmd = ['list', '--json'] installed = list() missing = list() From df618c2d48b3348028e98e3e8de706d33d489050 Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Thu, 4 Jun 2015 06:54:02 +0200 Subject: [PATCH 519/720] docs: removed default when required is true --- monitoring/datadog_monitor.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index b5ad2d2d6d6..24de8af10ba 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -41,15 +41,12 @@ options: api_key: description: ["Your DataDog API key."] required: true - default: null app_key: description: ["Your DataDog app key."] required: true - default: null state: description: ["The designated state of the monitor."] required: true - default: null choices: ['present', 'absent', 'muted', 'unmuted'] type: description: ["The type of the monitor."] @@ -63,7 +60,6 @@ options: name: description: ["The name of the alert."] required: true - default: null message: description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] required: false From 80b1b3add239c58582bc71576a5666d81580bff0 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Thu, 4 Jun 2015 22:17:16 +0100 Subject: [PATCH 520/720] Webfaction will create a default database user when db is created. For symmetry and repeatability, delete it when db is deleted. Add missing param to documentation. --- cloud/webfaction/webfaction_db.py | 48 ++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index a9ef88b943e..1a91d649458 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -4,7 +4,7 @@ # # ------------------------------------------ # -# (c) Quentin Stafford-Fraser 2015 +# (c) Quentin Stafford-Fraser and Andy Baker 2015 # # This file is part of Ansible # @@ -53,6 +53,12 @@ options: required: true choices: ['mysql', 'postgresql'] + password: + description: + - The password for the new database user. + required: false + default: None + login_name: description: - The webfaction account to use @@ -75,6 +81,10 @@ EXAMPLES = ''' type: mysql login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" + + # Note that, for symmetry's sake, deleting a database using + # 'state: absent' will also delete the matching user. + ''' import socket @@ -110,13 +120,17 @@ def main(): db_map = dict([(i['name'], i) for i in db_list]) existing_db = db_map.get(db_name) + user_list = webfaction.list_db_users(session_id) + user_map = dict([(i['username'], i) for i in user_list]) + existing_user = user_map.get(db_name) + result = {} # Here's where the real stuff happens if db_state == 'present': - # Does an app with this name already exist? + # Does an database with this name already exist? if existing_db: # Yes, but of a different type - fail if existing_db['db_type'] != db_type: @@ -129,8 +143,8 @@ def main(): if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args + # If this isn't a dry run, create the db + # and default user. result.update( webfaction.create_db( session_id, db_name, db_type, db_passwd @@ -139,17 +153,23 @@ def main(): elif db_state == 'absent': - # If the app's already not there, nothing changed. - if not existing_db: - module.exit_json( - changed = False, - ) - + # If this isn't a dry run... if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) + + if not (existing_db or existing_user): + module.exit_json(changed = False,) + + if existing_db: + # Delete the db if it exists + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + if existing_user: + # Delete the default db user if it exists + result.update( + webfaction.delete_db_user(session_id, db_name, db_type) + ) else: module.fail_json(msg="Unknown state specified: {}".format(db_state)) From 84b9ab435de312ddac377cb2f57f52da0a28f04d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 11:25:27 -0400 Subject: [PATCH 521/720] minor docs update --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 55599bdcca6..3e42ec1265e 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -31,7 +31,7 @@ module: webfaction_app short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 1a91d649458..f420490711c 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -28,7 +28,7 @@ module: webfaction_db short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index f2c95897bc5..0b35faf110f 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -28,7 +28,7 @@ module: webfaction_domain short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 976a428f3d3..7547b6154e5 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -27,7 +27,7 @@ module: webfaction_mailbox short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 223458faf46..57eae39c0dc 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -28,7 +28,7 @@ module: webfaction_site short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. From ab8dbd90f9869b343573391c2639e17c15e10071 Mon Sep 17 00:00:00 2001 From: "jonathan.lestrelin" Date: Fri, 5 Jun 2015 18:18:48 +0200 Subject: [PATCH 522/720] Add pear packaging module to manage PHP PEAR an PECL packages --- packaging/language/pear.py | 230 +++++++++++++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 packaging/language/pear.py diff --git a/packaging/language/pear.py b/packaging/language/pear.py new file mode 100644 index 00000000000..c9e3862a31f --- /dev/null +++ b/packaging/language/pear.py @@ -0,0 +1,230 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer +# (c) 2015, Jonathan Lestrelin +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +author: + - "'jonathan.lestrelin' " +notes: [] +requirements: [] +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: true + default: null + + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent", "latest"] +''' + +EXAMPLES = ''' +# Install pear package +- pear: name=Net_URL2 state=present + +# Install pecl package +- pear: name=pecl/json_post state=present + +# Upgrade package +- pear: name=Net_URL2 state=latest + +# Remove packages +- pear: name=Net_URL2,pecl/json_post state=absent +''' + +import os + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': continue + return installed + return None + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + +def query_package(module, name, state="present"): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + if state == "present": + lcmd = "pear info %s" % (name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = "pear remote-info %s" % (name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = "pear uninstall %s" % (package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages, package_files): + install_c = 0 + + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + cmd = "pear %s %s" % (command, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s" % (package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + +import os + +def exe_exists(program): + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): + return True + + return False + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=['pkg']), + state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])), + required_one_of = [['name']], + supports_check_mode = True) + + if not exe_exists("pear"): + module.fail_json(msg="cannot find pear executable in PATH") + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, pkg_files) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() From 537562217fbc7645a9771efb2f7bd051c948077a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:13:11 +0200 Subject: [PATCH 523/720] puppet: ensure puppet is in live mode per default puppet may be configured to operate in `--noop` mode per default. That is why we must pass a `--no-noop` to make sure, changes are going to be applied. --- system/puppet.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/system/puppet.py b/system/puppet.py index 46a5ea58d4f..3d4223bd1e5 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -156,10 +156,14 @@ def main(): cmd += " --show-diff" if module.check_mode: cmd += " --noop" + else: + cmd += " --no-noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd if module.check_mode: cmd += "--noop " + else: + cmd += "--no-noop " cmd += pipes.quote(p['manifest']) rc, stdout, stderr = module.run_command(cmd) From f33efc929a87fb3b206c106eeda70153e546b740 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:42:56 +0200 Subject: [PATCH 524/720] puppet: add --environment support --- system/puppet.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/system/puppet.py b/system/puppet.py index 46a5ea58d4f..49ccfaf3cbd 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -59,6 +59,11 @@ options: - Basename of the facter output file required: false default: ansible + environment: + desciption: + - Puppet environment to be used. + required: false + default: None requirements: [ puppet ] author: Monty Taylor ''' @@ -69,6 +74,9 @@ EXAMPLES = ''' # Run puppet and timeout in 5 minutes - puppet: timeout=5m + +# Run puppet using a different environment +- puppet: environment=testing ''' @@ -104,6 +112,7 @@ def main(): default=False, aliases=['show-diff'], type='bool'), facts=dict(default=None), facter_basename=dict(default='ansible'), + environment=dict(required=False, default=None), ), supports_check_mode=True, required_one_of=[ @@ -154,10 +163,14 @@ def main(): puppetmaster=pipes.quote(p['puppetmaster'])) if p['show_diff']: cmd += " --show-diff" + if p['environment']: + cmd += " --environment '%s'" % p['environment'] if module.check_mode: cmd += " --noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd + if p['environment']: + cmd += "--environment '%s' " % p['environment'] if module.check_mode: cmd += "--noop " cmd += pipes.quote(p['manifest']) From d63425388b4e58d37d435afadf40cbde9117d937 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:46:16 +0200 Subject: [PATCH 525/720] puppet: fix missing space between command and arg Fixes: ~~~ { "cmd": "/usr/bin/puppetconfig print agent_disabled_lockfile", "failed": true, "msg": "[Errno 2] No such file or directory", "rc": 2 } ~~~ --- system/puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index 46a5ea58d4f..a7796c1b7ca 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -128,7 +128,7 @@ def main(): # Check if puppet is disabled here if p['puppetmaster']: rc, stdout, stderr = module.run_command( - PUPPET_CMD + "config print agent_disabled_lockfile") + PUPPET_CMD + " config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): module.fail_json( msg="Puppet agent is administratively disabled.", disabled=True) From a7c7e2d6d55a94e85192a95213c4cff28342c28c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 10:08:16 +0200 Subject: [PATCH 526/720] puppet: make arg puppetmaster optional puppetmaster was used to determine if `agent` or `apply` should be used. But puppetmaster is not required by puppet per default. Puppet may have a config or could find out by itself (...) where the puppet master is. It changed the code so we only use `apply` if a manifest was passed, otherwise we use `agent`. This also fixes the example, which did not work the way without this change. ~~~ # Run puppet agent and fail if anything goes wrong - puppet ~~~ --- system/puppet.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index 46a5ea58d4f..e0a1cf79853 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -35,12 +35,12 @@ options: default: 30m puppetmaster: description: - - The hostname of the puppetmaster to contact. Must have this or manifest + - The hostname of the puppetmaster to contact. required: false default: None manifest: desciption: - - Path to the manifest file to run puppet apply on. Must have this or puppetmaster + - Path to the manifest file to run puppet apply on. required: false default: None show_diff: @@ -64,7 +64,7 @@ author: Monty Taylor ''' EXAMPLES = ''' -# Run puppet and fail if anything goes wrong +# Run puppet agent and fail if anything goes wrong - puppet # Run puppet and timeout in 5 minutes @@ -106,7 +106,7 @@ def main(): facter_basename=dict(default='ansible'), ), supports_check_mode=True, - required_one_of=[ + mutually_exclusive=[ ('puppetmaster', 'manifest'), ], ) @@ -126,7 +126,7 @@ def main(): manifest=p['manifest'])) # Check if puppet is disabled here - if p['puppetmaster']: + if not p['manifest']: rc, stdout, stderr = module.run_command( PUPPET_CMD + "config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): @@ -145,13 +145,14 @@ def main(): base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) - if p['puppetmaster']: + if not p['manifest']: cmd = ("%(base_cmd)s agent --onetime" - " --server %(puppetmaster)s" " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" " --detailed-exitcodes --verbose") % dict( base_cmd=base_cmd, - puppetmaster=pipes.quote(p['puppetmaster'])) + ) + if p['puppetmaster']: + cmd += " -- server %s" % pipes.quote(p['puppetmaster']) if p['show_diff']: cmd += " --show-diff" if module.check_mode: From 724501e9afc586f1a207d23fca3a72535ce4c738 Mon Sep 17 00:00:00 2001 From: Pepe Barbe Date: Sun, 7 Jun 2015 13:18:33 -0500 Subject: [PATCH 527/720] Refactor win_chocolatey module * Refactor code to be more robust. Run main logic inside a try {} catch {} block. If there is any error, bail out and log all the command output automatically. * Rely on error code generated by chocolatey instead of scraping text output to determine success/failure. * Add support for unattended installs: (`-y` flag is a requirement by chocolatey) * Before (un)installing, check existence of files. * Use functions to abstract logic * The great rewrite of 0.9.9, the `choco` interface has changed, check if chocolatey is installed and an older version. If so upgrade to latest. * Allow upgrading packages that are already installed * Use verbose logging for chocolate actions * Adding functionality to specify a source for a chocolatey repository. (@smadam813) * Removing pre-determined sources and adding specified source url in it's place. (@smadam813) Contains contributions from: * Adam Keech (@smadam813) --- windows/win_chocolatey.ps1 | 339 ++++++++++++++++++++++--------------- windows/win_chocolatey.py | 43 ++--- 2 files changed, 218 insertions(+), 164 deletions(-) diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index de42434da76..4a033d23157 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -16,25 +16,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +$ErrorActionPreference = "Stop" + # WANT_JSON # POWERSHELL_COMMON -function Write-Log -{ - param - ( - [parameter(mandatory=$false)] - [System.String] - $message - ) - - $date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz' - - Write-Host "$date | $message" - - Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append -} - $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; @@ -48,21 +34,22 @@ Else Fail-Json $result "missing required argument: name" } -if(($params.logPath).length -gt 0) +If ($params.force) { - $global:LoggingFile = $params.logPath + $force = $params.force | ConvertTo-Bool } -else +Else { - $global:LoggingFile = "c:\ansible-playbook.log" + $force = $false } -If ($params.force) + +If ($params.upgrade) { - $force = $params.force | ConvertTo-Bool + $upgrade = $params.upgrade | ConvertTo-Bool } Else { - $force = $false + $upgrade = $false } If ($params.version) @@ -74,6 +61,15 @@ Else $version = $null } +If ($params.source) +{ + $source = $params.source.ToString().ToLower() +} +Else +{ + $source = $null +} + If ($params.showlog) { $showlog = $params.showlog | ConvertTo-Bool @@ -96,157 +92,230 @@ Else $state = "present" } -$ChocoAlreadyInstalled = get-command choco -ErrorAction 0 -if ($ChocoAlreadyInstalled -eq $null) +Function Chocolatey-Install-Upgrade { - #We need to install chocolatey - $install_choco_result = iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) - $result.changed = $true - $executable = "C:\ProgramData\chocolatey\bin\choco.exe" -} -Else -{ - $executable = "choco.exe" -} + [CmdletBinding()] -If ($params.source) -{ - $source = $params.source.ToString().ToLower() - If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby") -and (!$source.startsWith("http://", "CurrentCultureIgnoreCase")) -and (!$source.startsWith("https://", "CurrentCultureIgnoreCase"))) + param() + + $ChocoAlreadyInstalled = get-command choco -ErrorAction 0 + if ($ChocoAlreadyInstalled -eq $null) + { + #We need to install chocolatey + iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) + $result.changed = $true + $script:executable = "C:\ProgramData\chocolatey\bin\choco.exe" + } + else { - Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi, windowsfeatures or a custom source url." + $script:executable = "choco.exe" + + if ((choco --version) -lt '0.9.9') + { + Choco-Upgrade chocolatey + } } } -Elseif (!$params.source) + + +Function Choco-IsInstalled { - $source = "chocolatey" + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package + ) + + $cmd = "$executable list --local-only $package" + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + + Throw "Error checking installation status for $package" + } + + If ("$results" -match " $package .* (\d+) packages installed.") + { + return $matches[1] -gt 0 + } + + $false } -if ($source -eq "webpi") +Function Choco-Upgrade { - # check whether 'webpi' installation source is available; if it isn't, install it - $webpi_check_cmd = "$executable list webpicmd -localonly" - $webpi_check_result = invoke-expression $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_cmd" $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_log" $webpi_check_result - if ( - ( - ($webpi_check_result.GetType().Name -eq "String") -and - ($webpi_check_result -match "No packages found") - ) -or - ($webpi_check_result -contains "No packages found.") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #lessmsi is a webpicmd dependency, but dependency resolution fails unless it's installed separately - $lessmsi_install_cmd = "$executable install lessmsi" - $lessmsi_install_result = invoke-expression $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_cmd" $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_log" $lessmsi_install_result + throw "$package is not installed, you cannot upgrade" + } - $webpi_install_cmd = "$executable install webpicmd" - $webpi_install_result = invoke-expression $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_cmd" $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_log" $webpi_install_result + $cmd = "$executable upgrade -dv -y $package" - if (($webpi_install_result | select-string "already installed").length -gt 0) - { - #no change - } - elseif (($webpi_install_result | select-string "webpicmd has finished successfully").length -gt 0) + if ($version) + { + $cmd += " -version $version" + } + + if ($source) + { + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" + } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" + } + + if ("$results" -match ' upgraded (\d+)/\d+ package\(s\)\. ') + { + if ($matches[1] -gt 0) { $result.changed = $true } - Else - { - Fail-Json $result "WebPI install error: $webpi_install_result" - } } } -$expression = $executable -if ($state -eq "present") -{ - $expression += " install $package" -} -Elseif ($state -eq "absent") -{ - $expression += " uninstall $package" -} -if ($force) + +Function Choco-Install { - if ($state -eq "present") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force, + [Parameter(Mandatory=$false, Position=5)] + [bool]$upgrade + ) + + if (Choco-IsInstalled $package) { - $expression += " -force" + if ($upgrade) + { + Choco-Upgrade -package $package -version $version -source $source -force $force + } + + return } -} -if ($version) -{ - $expression += " -version $version" -} -if ($source -eq "chocolatey") -{ - $expression += " -source https://chocolatey.org/api/v2/" -} -elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source -eq "ruby")) -{ - $expression += " -source $source" -} -elseif(($source -ne $Null) -and ($source -ne "")) -{ - $expression += " -source $source" -} -Set-Attr $result "chocolatey command" $expression -$op_result = invoke-expression $expression -if ($state -eq "present") -{ - if ( - (($op_result | select-string "already installed").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "No products to be installed").length -gt 0) - ) + $cmd = "$executable install -dv -y $package" + + if ($version) { - #no change + $cmd += " -version $version" } - elseif ( - (($op_result | select-string "has finished successfully").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "Install of Products: SUCCESS").length -gt 0) -or - (($op_result | select-string "gem installed").length -gt 0) -or - (($op_result | select-string "gems installed").length -gt 0) - ) + + if ($source) { - $result.changed = $true + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" } - Else + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) { - Fail-Json $result "Install error: $op_result" + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" } + + $result.changed = $true } -Elseif ($state -eq "absent") + +Function Choco-Uninstall { - $op_result = invoke-expression "$executable uninstall $package" - # HACK: Misleading - 'Uninstalling from folder' appears in output even when package is not installed, hence order of checks this way - if ( - (($op_result | select-string "not installed").length -gt 0) -or - (($op_result | select-string "Cannot find path").length -gt 0) + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #no change + return } - elseif (($op_result | select-string "Uninstalling from folder").length -gt 0) + + $cmd = "$executable uninstall -dv -y $package" + + if ($version) { - $result.changed = $true + $cmd += " -version $version" } - else + + if ($force) { - Fail-Json $result "Uninstall error: $op_result" + $cmd += " -force" } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error uninstalling $package" + } + + $result.changed = $true } +Try +{ + Chocolatey-Install-Upgrade + + if ($state -eq "present") + { + Choco-Install -package $package -version $version -source $source ` + -force $force -upgrade $upgrade + } + else + { + Choco-Uninstall -package $package -version $version -force $force + } -if ($showlog) + Exit-Json $result; +} +Catch { - Set-Attr $result "chocolatey_log" $op_result + Fail-Json $result $_.Exception.Message } -Set-Attr $result "chocolatey_success" "true" -Exit-Json $result; diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index 63ec1ecd214..fe00f2e0f6a 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -53,42 +53,29 @@ options: - no default: no aliases: [] - version: + upgrade: description: - - Specific version of the package to be installed - - Ignored when state == 'absent' - required: false - default: null - aliases: [] - showlog: - description: - - Outputs the chocolatey log inside a chocolatey_log property. + - If package is already installed it, try to upgrade to the latest version or to the specified version required: false choices: - yes - no default: no aliases: [] - source: + version: description: - - Which source to install from - require: false - choices: - - chocolatey - - ruby - - webpi - - windowsfeatures - default: chocolatey + - Specific version of the package to be installed + - Ignored when state == 'absent' + required: false + default: null aliases: [] - logPath: + source: description: - - Where to log command output to + - Specify source rather than using default chocolatey repository require: false - default: c:\\ansible-playbook.log + default: null aliases: [] -author: - - '"Trond Hindenes (@trondhindenes)" ' - - '"Peter Mounce (@petemounce)" ' +author: Trond Hindenes, Peter Mounce, Pepe Barbe, Adam Keech ''' # TODO: @@ -111,10 +98,8 @@ EXAMPLES = ''' name: git state: absent - # Install Application Request Routing v3 from webpi - # Logically, this requires that you install IIS first (see win_feature) - # To find a list of packages available via webpi source, `choco list -source webpi` + # Install git from specified repository win_chocolatey: - name: ARRv3 - source: webpi + name: git + source: https://someserver/api/v2/ ''' From 53bb87d110d2e4b8dd429f66ab93e2d2bf646335 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 7 Jun 2015 17:45:33 -0400 Subject: [PATCH 528/720] added missing options: --- cloud/cloudstack/cs_project.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index b604a1b6f32..e604abc13db 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -26,6 +26,7 @@ description: - Create, update, suspend, activate and remove projects. version_added: '2.0' author: '"René Moser (@resmo)" ' +options: name: description: - Name of the project. From bcee7c13cfd867c880914d8547e3ddee844acf46 Mon Sep 17 00:00:00 2001 From: "jonathan.lestrelin" Date: Mon, 8 Jun 2015 09:28:01 +0200 Subject: [PATCH 529/720] Fix unused import and variable and correct documentation --- packaging/language/pear.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/packaging/language/pear.py b/packaging/language/pear.py index c9e3862a31f..5762f9c815c 100644 --- a/packaging/language/pear.py +++ b/packaging/language/pear.py @@ -26,16 +26,14 @@ module: pear short_description: Manage pear/pecl packages description: - Manage PHP packages with the pear package manager. +version_added: 2.0 author: - "'jonathan.lestrelin' " -notes: [] -requirements: [] options: name: description: - Name of the package to install, upgrade, or remove. required: true - default: null state: description: @@ -132,7 +130,7 @@ def remove_packages(module, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, state, packages, package_files): +def install_packages(module, state, packages): install_c = 0 for i, package in enumerate(packages): @@ -178,7 +176,6 @@ def check_packages(module, packages, state): else: module.exit_json(change=False, msg="package(s) already %s" % state) -import os def exe_exists(program): for path in os.environ["PATH"].split(os.pathsep): @@ -220,7 +217,7 @@ def main(): check_packages(module, pkgs, p['state']) if p['state'] in ['present', 'latest']: - install_packages(module, p['state'], pkgs, pkg_files) + install_packages(module, p['state'], pkgs) elif p['state'] == 'absent': remove_packages(module, pkgs) From f09389b1792a720cc9eede346eebeb1a6a88510f Mon Sep 17 00:00:00 2001 From: Jhonny Everson Date: Mon, 8 Jun 2015 17:46:53 -0300 Subject: [PATCH 530/720] Adds handler for error responses --- monitoring/datadog_monitor.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 24de8af10ba..97968ed648d 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -187,7 +187,10 @@ def _post_monitor(module, options): msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) - module.exit_json(changed=True, msg=msg) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) @@ -197,7 +200,9 @@ def _update_monitor(module, monitor, options): msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) - if len(set(msg) - set(monitor)) == 0: + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif len(set(msg) - set(monitor)) == 0: module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) @@ -243,7 +248,7 @@ def mute_monitor(module): module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None + elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): module.exit_json(changed=False) try: From 443be858f1b2705617787ab4035ba00e3f840e7d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 9 Jun 2015 13:06:24 +0200 Subject: [PATCH 531/720] cloudstack: fix project name must not be case sensitiv --- cloud/cloudstack/cs_project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index e604abc13db..13209853527 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -167,7 +167,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack): projects = self.cs.listProjects(**args) if projects: for p in projects['project']: - if project in [ p['name'], p['id']]: + if project.lower() in [ p['name'].lower(), p['id']]: self.project = p break return self.project From 1b8eb9091b53610e8cf71562509c610e5f0ef23e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 9 Jun 2015 13:08:38 +0200 Subject: [PATCH 532/720] cloudstack: remove listall in cs_project listall in cs_project can return the wrong project for root admins, because project name are not unique in separate accounts. --- cloud/cloudstack/cs_project.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index 13209853527..b505433892e 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -160,7 +160,6 @@ class AnsibleCloudStackProject(AnsibleCloudStack): project = self.module.params.get('name') args = {} - args['listall'] = True args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') From d517abf44b515746f44c757e0949977e68e6f723 Mon Sep 17 00:00:00 2001 From: Jhonny Everson Date: Tue, 9 Jun 2015 09:44:34 -0300 Subject: [PATCH 533/720] Fixes the bug where it was using only the keys to determine whether a change was made, i.e. values changes for existing keys was reported incorrectly. --- monitoring/datadog_monitor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 97968ed648d..cb54cd32b5d 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -194,6 +194,10 @@ def _post_monitor(module, options): except Exception, e: module.fail_json(msg=str(e)) +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) def _update_monitor(module, monitor, options): try: @@ -202,7 +206,7 @@ def _update_monitor(module, monitor, options): options=options) if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) - elif len(set(msg) - set(monitor)) == 0: + elif _equal_dicts(msg, monitor, ['creator', 'overall_state']): module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) From bca0d2d32b105b34d050754a1ba69353805ff60d Mon Sep 17 00:00:00 2001 From: David Siefert Date: Tue, 9 Jun 2015 10:21:33 -0500 Subject: [PATCH 534/720] Adding support for setting the topic of a channel --- notification/irc.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index 8b87c41f1ba..e6852c8510a 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -47,6 +47,12 @@ options: - The message body. required: true default: null + topic: + description: + - Set the channel topic + required: false + default: null + version_added: 2.0 color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). @@ -106,7 +112,7 @@ import ssl from time import sleep -def send_msg(channel, msg, server='localhost', port='6667', key=None, +def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None, nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' @@ -163,6 +169,10 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, raise Exception('Timeout waiting for IRC JOIN response') sleep(0.5) + if topic is not None: + irc.send('TOPIC %s :%s\r\n' % (channel, topic)) + sleep(1) + irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) @@ -186,6 +196,7 @@ def main(): "blue", "black", "none"]), channel=dict(required=True), key=dict(), + topic=dict(), passwd=dict(), timeout=dict(type='int', default=30), use_ssl=dict(type='bool', default=False) @@ -196,6 +207,7 @@ def main(): server = module.params["server"] port = module.params["port"] nick = module.params["nick"] + topic = module.params["topic"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] @@ -205,7 +217,7 @@ def main(): use_ssl = module.params["use_ssl"] try: - send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) + send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) From ef7381f24636a350dd7bd0d061634fd2203d1b61 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 9 Jun 2015 12:58:45 -0400 Subject: [PATCH 535/720] Adding author's github id --- monitoring/datadog_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index cb54cd32b5d..f1acb169ce0 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -34,7 +34,7 @@ description: - "Manages monitors within Datadog" - "Options like described on http://docs.datadoghq.com/api/" version_added: "2.0" -author: '"Sebastian Kornehl" ' +author: '"Sebastian Kornehl (@skornehl)" ' notes: [] requirements: [datadog] options: From 2643c3eddad1d313ead9131405f66c927ba999d2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 13:00:02 +0200 Subject: [PATCH 536/720] puppet: update author to new format --- system/puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index 83bbcbe6e18..336b2c81108 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -65,7 +65,7 @@ options: required: false default: None requirements: [ puppet ] -author: Monty Taylor +author: "Monty Taylor (@emonty)" ''' EXAMPLES = ''' From 2f967a949f9a45657c31ae66c0c7e7c2672a87d8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 12:58:44 -0400 Subject: [PATCH 537/720] minor docfix --- monitoring/nagios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 543f094b70e..0026751ea58 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -77,7 +77,7 @@ options: version_added: "2.0" description: - the Servicegroup we want to set downtimes/alerts for. - B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: - The raw command to send to nagios, which From bec97ff60e95029efe17e3781ac8de64ce10478e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 23:31:48 +0200 Subject: [PATCH 538/720] cloudstack: add new module cs_network --- cloud/cloudstack/cs_network.py | 637 +++++++++++++++++++++++++++++++++ 1 file changed, 637 insertions(+) create mode 100644 cloud/cloudstack/cs_network.py diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py new file mode 100644 index 00000000000..c8b3b32539d --- /dev/null +++ b/cloud/cloudstack/cs_network.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_network +short_description: Manages networks on Apache CloudStack based clouds. +description: + - Create, update, restart and delete networks. +version_added: '2.0' +author: '"René Moser (@resmo)" ' +options: + name: + description: + - Name (case sensitive) of the network. + required: true + displaytext: + description: + - Displaytext of the network. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + network_offering: + description: + - Name of the offering for the network. + - Required if C(state=present). + required: false + default: null + start_ip: + description: + - The beginning IPv4 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ip: + description: + - The ending IPv4 address of the network belongs to. + - If not specified, value of C(start_ip) is used. + - Only considered on create. + required: false + default: null + gateway: + description: + - The gateway of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + netmask: + description: + - The netmask of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + start_ipv6: + description: + - The beginning IPv6 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ipv6: + description: + - The ending IPv6 address of the network belongs to. + - If not specified, value of C(start_ipv6) is used. + - Only considered on create. + required: false + default: null + cidr_ipv6: + description: + - CIDR of IPv6 network, must be at least /64. + - Only considered on create. + required: false + default: null + gateway_ipv6: + description: + - The gateway of the IPv6 network. + - Required for shared networks. + - Only considered on create. + required: false + default: null + vlan: + description: + - The ID or VID of the network. + required: false + default: null + vpc: + description: + - The ID or VID of the network. + required: false + default: null + isolated_pvlan: + description: + - The isolated private vlan for this network. + required: false + default: null + clean_up: + description: + - Cleanup old network elements. + - Only considered on C(state=restarted). + required: false + default: null + acl_type: + description: + - Access control type. + - Only considered on create. + required: false + default: account + choices: [ 'account', 'domain' ] + network_domain: + description: + - The network domain. + required: false + default: null + state: + description: + - State of the network. + required: false + default: present + choices: [ 'present', 'absent', 'restarted' ] + zone: + description: + - Name of the zone in which the network should be deployed. + - If not set, default zone is used. + required: false + default: null + project: + description: + - Name of the project the network to be deployed in. + required: false + default: null + domain: + description: + - Domain the network is related to. + required: false + default: null + account: + description: + - Account the network is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a network +- local_action: + module: cs_network + name: my network + zone: gva-01 + network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService + network_domain: example.com + +# update a network +- local_action: + module: cs_network + name: my network + displaytext: network of domain example.local + network_domain: example.local + +# restart a network with clean up +- local_action: + module: cs_network + name: my network + clean_up: yes + state: restared + +# remove a network +- local_action: + module: cs_network + name: my network + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the network. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the network. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the network. + returned: success + type: string + sample: web project +dns1: + description: IP address of the 1st nameserver. + returned: success + type: string + sample: 1.2.3.4 +dns2: + description: IP address of the 2nd nameserver. + returned: success + type: string + sample: 1.2.3.4 +cidr: + description: IPv4 network CIDR. + returned: success + type: string + sample: 10.101.64.0/24 +gateway: + description: IPv4 gateway. + returned: success + type: string + sample: 10.101.64.1 +netmask: + description: IPv4 netmask. + returned: success + type: string + sample: 255.255.255.0 +cidr_ipv6: + description: IPv6 network CIDR. + returned: success + type: string + sample: 2001:db8::/64 +gateway_ipv6: + description: IPv6 gateway. + returned: success + type: string + sample: 2001:db8::1 +state: + description: State of the network. + returned: success + type: string + sample: Implemented +zone: + description: Name of zone. + returned: success + type: string + sample: ch-gva-2 +domain: + description: Domain the network is related to. + returned: success + type: string + sample: ROOT +account: + description: Account the network is related to. + returned: success + type: string + sample: example account +project: + description: Name of project. + returned: success + type: string + sample: Production +tags: + description: List of resource tags associated with the network. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +acl_type: + description: Access type of the network (Domain, Account). + returned: success + type: string + sample: Account +broadcast_domaintype: + description: Broadcast domain type of the network. + returned: success + type: string + sample: Vlan +type: + description: Type of the network. + returned: success + type: string + sample: Isolated +traffic_type: + description: Traffic type of the network. + returned: success + type: string + sample: Guest +state: + description: State of the network (Allocated, Implemented, Setup). + returned: success + type: string + sample: Allocated +is_persistent: + description: Whether the network is persistent or not. + returned: success + type: boolean + sample: false +network_domain: + description: The network domain + returned: success + type: string + sample: example.local +network_offering: + description: The network offering name. + returned: success + type: string + sample: DefaultIsolatedNetworkOfferingWithSourceNatService +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackNetwork(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.network = None + + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + + def get_vpc(self, key=None): + vpc = self.module.params.get('vpc') + if not vpc: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') + + vpcs = self.cs.listVPCs(**args) + if vpcs: + for v in vpcs['vpc']: + if vpc in [ v['name'], v['displaytext'], v['id'] ]: + return self._get_by_key(key, v) + self.module.fail_json(msg="VPC '%s' not found" % vpc) + + + def get_network_offering(self, key=None): + network_offering = self.module.params.get('network_offering') + if not network_offering: + self.module.fail_json(msg="missing required arguments: network_offering") + + args = {} + args['zoneid'] = self.get_zone(key='id') + + network_offerings = self.cs.listNetworkOfferings(**args) + if network_offerings: + for no in network_offerings['networkoffering']: + if network_offering in [ no['name'], no['displaytext'], no['id'] ]: + return self._get_by_key(key, no) + self.module.fail_json(msg="Network offering '%s' not found" % network_offering) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_or_fallback('displaytext','name') + args['networkdomain'] = self.module.params.get('network_domain') + args['networkofferingid'] = self.get_network_offering(key='id') + return args + + + def get_network(self): + if not self.network: + network = self.module.params.get('name') + + args = {} + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + networks = self.cs.listNetworks(**args) + if networks: + for n in networks['network']: + if network in [ n['name'], n['displaytext'], n['id']]: + self.network = n + break + return self.network + + + def present_network(self): + network = self.get_network() + if not network: + network = self.create_network(network) + else: + network = self.update_network(network) + return network + + + def update_network(self, network): + args = self._get_args() + args['id'] = network['id'] + + if self._has_changed(args, network): + self.result['changed'] = True + if not self.module.check_mode: + network = self.cs.updateNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def create_network(self, network): + self.result['changed'] = True + + args = self._get_args() + args['acltype'] = self.module.params.get('acl_type') + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['startip'] = self.module.params.get('start_ip') + args['endip'] = self.get_or_fallback('end_ip', 'start_ip') + args['netmask'] = self.module.params.get('netmask') + args['gateway'] = self.module.params.get('gateway') + args['startipv6'] = self.module.params.get('start_ipv6') + args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6') + args['ip6cidr'] = self.module.params.get('cidr_ipv6') + args['ip6gateway'] = self.module.params.get('gateway_ipv6') + args['vlan'] = self.module.params.get('vlan') + args['isolatedpvlan'] = self.module.params.get('isolated_pvlan') + args['subdomainaccess'] = self.module.params.get('subdomain_access') + args['vpcid'] = self.get_vpc(key='id') + + if not self.module.check_mode: + res = self.cs.createNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + network = res['network'] + return network + + + def restart_network(self): + network = self.get_network() + + if not network: + self.module.fail_json(msg="No network named '%s' found." % self.module.params('name')) + + # Restarting only available for these states + if network['state'].lower() in [ 'implemented', 'setup' ]: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + args['cleanup'] = self.module.params.get('clean_up') + + if not self.module.check_mode: + network = self.cs.restartNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def absent_network(self): + network = self.get_network() + if network: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + + if not self.module.check_mode: + res = self.cs.deleteNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'network') + return network + + + def get_result(self, network): + if network: + if 'id' in network: + self.result['id'] = network['id'] + if 'name' in network: + self.result['name'] = network['name'] + if 'displaytext' in network: + self.result['displaytext'] = network['displaytext'] + if 'dns1' in network: + self.result['dns1'] = network['dns1'] + if 'dns2' in network: + self.result['dns2'] = network['dns2'] + if 'cidr' in network: + self.result['cidr'] = network['cidr'] + if 'broadcastdomaintype' in network: + self.result['broadcast_domaintype'] = network['broadcastdomaintype'] + if 'netmask' in network: + self.result['netmask'] = network['netmask'] + if 'gateway' in network: + self.result['gateway'] = network['gateway'] + if 'ip6cidr' in network: + self.result['cidr_ipv6'] = network['ip6cidr'] + if 'ip6gateway' in network: + self.result['gateway_ipv6'] = network['ip6gateway'] + if 'state' in network: + self.result['state'] = network['state'] + if 'type' in network: + self.result['type'] = network['type'] + if 'traffictype' in network: + self.result['traffic_type'] = network['traffictype'] + if 'zone' in network: + self.result['zone'] = network['zonename'] + if 'domain' in network: + self.result['domain'] = network['domain'] + if 'account' in network: + self.result['account'] = network['account'] + if 'project' in network: + self.result['project'] = network['project'] + if 'acltype' in network: + self.result['acl_type'] = network['acltype'] + if 'networkdomain' in network: + self.result['network_domain'] = network['networkdomain'] + if 'networkofferingname' in network: + self.result['network_offering'] = network['networkofferingname'] + if 'ispersistent' in network: + self.result['is_persistent'] = network['ispersistent'] + if 'tags' in network: + self.result['tags'] = [] + for tag in network['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + network_offering = dict(default=None), + zone = dict(default=None), + start_ip = dict(default=None), + end_ip = dict(default=None), + gateway = dict(default=None), + netmask = dict(default=None), + start_ipv6 = dict(default=None), + end_ipv6 = dict(default=None), + cidr_ipv6 = dict(default=None), + gateway_ipv6 = dict(default=None), + vlan = dict(default=None), + vpc = dict(default=None), + isolated_pvlan = dict(default=None), + clean_up = dict(default=None), + network_domain = dict(default=None), + state = dict(choices=['present', 'absent', 'restarted' ], default='present'), + acl_type = dict(choices=['account', 'domain'], default='account'), + project = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['start_ip', 'netmask', 'gateway'], + ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_network = AnsibleCloudStackNetwork(module) + + state = module.params.get('state') + if state in ['absent']: + network = acs_network.absent_network() + + elif state in ['restarted']: + network = acs_network.restart_network() + + else: + network = acs_network.present_network() + + result = acs_network.get_result(network) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 4f38c4387b7dc079af2fa3f684d68eb7bab2b541 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 11 Jun 2015 11:36:34 -0500 Subject: [PATCH 539/720] Add new module 'expect' --- commands/__init__.py | 0 commands/expect.py | 189 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 commands/__init__.py create mode 100644 commands/expect.py diff --git a/commands/__init__.py b/commands/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/commands/expect.py b/commands/expect.py new file mode 100644 index 00000000000..0922ba4e464 --- /dev/null +++ b/commands/expect.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import datetime + +try: + import pexpect + HAS_PEXPECT = True +except ImportError: + HAS_PEXPECT = False + + +DOCUMENTATION = ''' +--- +module: expect +version_added: 2.0 +short_description: Executes a command and responds to prompts +description: + - The M(expect) module executes a command and responds to prompts + - The given command will be executed on all selected nodes. It will not be + processed through the shell, so variables like C($HOME) and operations + like C("<"), C(">"), C("|"), and C("&") will not work +options: + command: + description: + - the command module takes command to run. + required: true + creates: + description: + - a filename, when it already exists, this step will B(not) be run. + required: false + removes: + description: + - a filename, when it does not exist, this step will B(not) be run. + required: false + chdir: + description: + - cd into this directory before running the command + required: false + executable: + description: + - change the shell used to execute the command. Should be an absolute + path to the executable. + required: false + responses: + description: + - Mapping of expected string and string to respond with + required: true + timeout: + description: + - Amount of time in seconds to wait for the expected strings + default: 30 + echo: + description: + - Whether or not to echo out your response strings + default: false +requirements: + - python >= 2.6 + - pexpect >= 3.3 +notes: + - If you want to run a command through the shell (say you are using C(<), + C(>), C(|), etc), you must specify a shell in the command such as + C(/bin/bash -c "/path/to/something | grep else") +author: '"Matt Martz (@sivel)" ' +''' + +EXAMPLES = ''' +- expect: + command: passwd username + responses: + (?i)password: "MySekretPa$$word" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(required=True), + chdir=dict(), + executable=dict(), + creates=dict(), + removes=dict(), + responses=dict(type='dict', required=True), + timeout=dict(type='int', default=30), + echo=dict(type='bool', default=False), + ) + ) + + if not HAS_PEXPECT: + module.fail_json(msg='The pexpect python module is required') + + chdir = module.params['chdir'] + executable = module.params['executable'] + args = module.params['command'] + creates = module.params['creates'] + removes = module.params['removes'] + responses = module.params['responses'] + timeout = module.params['timeout'] + echo = module.params['echo'] + + events = dict() + for key, value in responses.iteritems(): + events[key.decode()] = u'%s\n' % value.rstrip('\n').decode() + + if args.strip() == '': + module.fail_json(rc=256, msg="no command given") + + if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) + os.chdir(chdir) + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + v = os.path.expanduser(creates) + if os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s exists" % v, + changed=False, + stderr=False, + rc=0 + ) + + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + v = os.path.expanduser(removes) + if not os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s does not exist" % v, + changed=False, + stderr=False, + rc=0 + ) + + startd = datetime.datetime.now() + + if executable: + cmd = '%s %s' % (executable, args) + else: + cmd = args + + try: + out, rc = pexpect.runu(cmd, timeout=timeout, withexitstatus=True, + events=events, cwd=chdir, echo=echo) + except pexpect.ExceptionPexpect, e: + module.fail_json(msg='%s' % e) + + endd = datetime.datetime.now() + delta = endd - startd + + if out is None: + out = '' + + module.exit_json( + cmd=args, + stdout=out.rstrip('\r\n'), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + ) + +# import module snippets +from ansible.module_utils.basic import * + +main() From 76e382abaa3f5906dc79a4d9bfeb66c39892ebc8 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 11 Jun 2015 12:36:47 -0500 Subject: [PATCH 540/720] Remove the executable option as it's redundant --- commands/expect.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/commands/expect.py b/commands/expect.py index 0922ba4e464..124c718b73b 100644 --- a/commands/expect.py +++ b/commands/expect.py @@ -54,11 +54,6 @@ options: description: - cd into this directory before running the command required: false - executable: - description: - - change the shell used to execute the command. Should be an absolute - path to the executable. - required: false responses: description: - Mapping of expected string and string to respond with @@ -94,7 +89,6 @@ def main(): argument_spec=dict( command=dict(required=True), chdir=dict(), - executable=dict(), creates=dict(), removes=dict(), responses=dict(type='dict', required=True), @@ -107,7 +101,6 @@ def main(): module.fail_json(msg='The pexpect python module is required') chdir = module.params['chdir'] - executable = module.params['executable'] args = module.params['command'] creates = module.params['creates'] removes = module.params['removes'] @@ -156,13 +149,8 @@ def main(): startd = datetime.datetime.now() - if executable: - cmd = '%s %s' % (executable, args) - else: - cmd = args - try: - out, rc = pexpect.runu(cmd, timeout=timeout, withexitstatus=True, + out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True, events=events, cwd=chdir, echo=echo) except pexpect.ExceptionPexpect, e: module.fail_json(msg='%s' % e) From 4fc275d1c59e91864f1f84af950e79bd28759fd2 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:49:37 -0400 Subject: [PATCH 541/720] remove extraneous imports --- cloud/amazon/cloudtrail.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 6a1885d6ee7..d6ed254df91 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -90,11 +90,6 @@ EXAMPLES = """ local_action: cloudtrail state=absent name=main region=us-east-1 """ -import time -import sys -import os -from collections import Counter - boto_import_failed = False try: import boto From d0ef6db43cb5788bdac4a296537f2e3ce11d3ef6 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:49:59 -0400 Subject: [PATCH 542/720] There is no absent, only disabled --- cloud/amazon/cloudtrail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index d6ed254df91..eb445768ed5 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -87,7 +87,7 @@ EXAMPLES = """ s3_key_prefix='' region=us-east-1 - name: remove cloudtrail - local_action: cloudtrail state=absent name=main region=us-east-1 + local_action: cloudtrail state=disabled name=main region=us-east-1 """ boto_import_failed = False From d1f50493bd062cdd9320916a1c1a891ac8553186 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:50:27 -0400 Subject: [PATCH 543/720] Fix boto library checking --- cloud/amazon/cloudtrail.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index eb445768ed5..5a87f35e918 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -90,13 +90,14 @@ EXAMPLES = """ local_action: cloudtrail state=disabled name=main region=us-east-1 """ -boto_import_failed = False +HAS_BOTO = False try: import boto import boto.cloudtrail from boto.regioninfo import RegionInfo + HAS_BOTO = True except ImportError: - boto_import_failed = True + HAS_BOTO = False class CloudTrailManager: """Handles cloudtrail configuration""" @@ -147,9 +148,6 @@ class CloudTrailManager: def main(): - if not has_libcloud: - module.fail_json(msg='boto is required.') - argument_spec = ec2_argument_spec() argument_spec.update(dict( state={'required': True, 'choices': ['enabled', 'disabled'] }, @@ -161,6 +159,10 @@ def main(): required_together = ( ['state', 's3_bucket_name'] ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + + if not HAS_BOTO: + module.fail_json(msg='Alex sucks boto is required.') + ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key) From 416d96a1e67847609a5642690545f6db17a637c4 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 01:31:45 -0400 Subject: [PATCH 544/720] Error message typo --- cloud/amazon/cloudtrail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 5a87f35e918..962473e6a9e 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -161,7 +161,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) if not HAS_BOTO: - module.fail_json(msg='Alex sucks boto is required.') + module.fail_json(msg='boto is required.') ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, From 3f76a37f27dac02bc0423565904bb6cad2957760 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 14:11:38 -0400 Subject: [PATCH 545/720] fixed doc issues --- network/nmcli.py | 71 ++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 18f0ecbab1f..45043fd2807 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -25,6 +25,7 @@ module: nmcli author: Chris Long short_description: Manage Networking requirements: [ nmcli, dbus ] +version_added: "2.0" description: - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc. options: @@ -39,11 +40,11 @@ options: choices: [ "yes", "no" ] description: - Whether the connection should start on boot. - - Whether the connection profile can be automatically activated ( default: yes) + - Whether the connection profile can be automatically activated conn_name: required: True description: - - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] + - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-]' ifname: required: False default: conn_name @@ -60,9 +61,9 @@ options: mode: required: False choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ] - default: None + default: balence-rr description: - - This is the type of device or network connection that you wish to create for a bond, team or bridge. (NetworkManager default: balance-rr) + - This is the type of device or network connection that you wish to create for a bond, team or bridge. master: required: False default: None @@ -72,35 +73,35 @@ options: required: False default: None description: - - The IPv4 address to this interface using this format ie: "192.168.1.24/24" + - 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"' gw4: required: False description: - - The IPv4 gateway for this interface using this format ie: "192.168.100.1" + - 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"' dns4: required: False default: None description: - - A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] + - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]' ip6: required: False default: None description: - - The IPv6 address to this interface using this format ie: "abbe::cafe" + - 'The IPv6 address to this interface using this format ie: "abbe::cafe"' gw6: required: False default: None description: - - The IPv6 gateway for this interface using this format ie: "2001:db8::1" + - 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"' dns6: required: False description: - - A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] + - 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]' mtu: required: False - default: None + default: 1500 description: - - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. (NetworkManager default: 1500) + - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband) primary: required: False @@ -109,24 +110,24 @@ options: - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname' miimon: required: False - default: None + default: 100 description: - - This is only used with bond - miimon (NetworkManager default: 100) + - This is only used with bond - miimon downdelay: required: False default: None description: - - This is only used with bond - downdelay (NetworkManager default: 0) + - This is only used with bond - downdelay updelay: required: False default: None description: - - This is only used with bond - updelay (NetworkManager default: 0) + - This is only used with bond - updelay arp_interval: required: False default: None description: - - This is only used with bond - ARP interval (NetworkManager default: 0) + - This is only used with bond - ARP interval arp_ip_target: required: False default: None @@ -139,49 +140,49 @@ options: - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge priority: required: False - default: None + default: 128 description: - - This is only used with 'bridge' - sets STP priority (NetworkManager default: 128) + - This is only used with 'bridge' - sets STP priority forwarddelay: required: False - default: None + default: 15 description: - - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds (NetworkManager default: 15) + - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds hellotime: required: False - default: None + default: 2 description: - - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds (NetworkManager default: 2) + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds maxage: required: False - default: None + default: 20 description: - - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds (NetworkManager default: 20) + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds ageingtime: required: False - default: None + default: 300 description: - - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds (NetworkManager default: 300) + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds mac: required: False default: None description: - - This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel) + - 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)' slavepriority: required: False - default: None + default: 32 description: - - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave (default: 32) + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave path_cost: required: False - default: None + default: 100 description: - - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave (NetworkManager default: 100) + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave hairpin: required: False - default: None + default: yes description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. (NetworkManager default: yes) + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. vlanid: required: False default: None @@ -1066,4 +1067,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From d3b3d7ff3c249de062df93a533a568f0681cce3c Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Sat, 13 Jun 2015 13:56:26 -0500 Subject: [PATCH 546/720] Fix the lxc container restart state The lxc container restart state does not ensure that the container is in fact started unless another config or command is passed into the task. to fix this the module simply needs to have the function call added ``self._container_startup()`` after the container is put into a stopped state. Signed-off By: Kevin Carter --- cloud/lxc/lxc_container.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 18555e2e351..7fc86825c52 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -1065,6 +1065,9 @@ class LxcContainerManagement(object): self.container.stop() self.state_change = True + # Run container startup + self._container_startup() + # Check if the container needs to have an archive created. self._check_archive() From 9f0ee40b42f491421e582066c8b82ea95d0cf769 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 18:57:00 -0500 Subject: [PATCH 547/720] Add ec2_vpc_igw module. --- cloud/amazon/ec2_vpc_igw.py | 189 ++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_igw.py diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py new file mode 100644 index 00000000000..1c5bf9dea1c --- /dev/null +++ b/cloud/amazon/ec2_vpc_igw.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS internat gateway in a virtual private cloud. ''' +'''This module has a dependency on python-boto. +version_added: "1.8" +options: + vpc_id: + description: + - "The VPC ID for which to create or remove the Internet Gateway." + required: true + state: + description: + - Create or terminate the IGW + required: true + default: present + aliases: [] + region: + description: + - region in which the resource exists. + required: false + default: null + aliases: ['aws_region', 'ec2_region'] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto''' +''' versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + +requirements: [ "boto" ] +author: Robert Estelle +''' + +EXAMPLES = ''' +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use +# in setting up NATs etc. + local_action: + module: ec2_vpc_igw + vpc_id: {{vpc.vpc_id}} + region: {{vpc.vpc.region}} + state: present + register: igw +''' + + +import sys + +try: + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +class IGWExcepton(Exception): + pass + + +def ensure_igw_absent(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if not igws: + return {'changed': False} + + if check_mode: + return {'changed': True} + + for igw in igws: + try: + vpc_conn.detach_internet_gateway(igw.id, vpc_id) + vpc_conn.delete_internet_gateway(igw.id) + except EC2ResponseError as e: + raise IGWExcepton('Unable to delete Internet Gateway, error: {0}' + .format(e)) + + return {'changed': True} + + +def ensure_igw_present(vpc_conn, vpc_id, check_mode): + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if len(igws) > 1: + raise IGWExcepton( + 'EC2 returned more than one Internet Gateway for VPC {0}, aborting' + .format(vpc_id)) + + if igws: + return {'changed': False, 'gateway_id': igws[0].id} + else: + if check_mode: + return {'changed': True, 'gateway_id': None} + + try: + igw = vpc_conn.create_internet_gateway() + vpc_conn.attach_internet_gateway(igw.id, vpc_id) + return {'changed': True, 'gateway_id': igw.id} + except EC2ResponseError as e: + raise IGWExcepton('Unable to create Internet Gateway, error: {0}' + .format(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update({ + 'vpc_id': {'required': True}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + }) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + if not region: + module.fail_json(msg='Region must be specified') + + try: + vpc_conn = boto.vpc.connect_to_region( + region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + vpc_id = module.params.get('vpc_id') + state = module.params.get('state', 'present') + + try: + if state == 'present': + result = ensure_igw_present(vpc_conn, vpc_id, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_igw_absent(vpc_conn, vpc_id, + check_mode=module.check_mode) + except IGWExcepton as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa + +if __name__ == '__main__': + main() From 829759fba7f392e5998e5508faa2c30b85249ea2 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 16:01:46 -0500 Subject: [PATCH 548/720] ec2_vpc_igw - Exit with fail_json when boto is unavailable. --- cloud/amazon/ec2_vpc_igw.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 1c5bf9dea1c..7276157bd56 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -83,15 +83,17 @@ EXAMPLES = ''' ''' -import sys +import sys # noqa try: import boto.ec2 import boto.vpc from boto.exception import EC2ResponseError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False + if __name__ != '__main__': + raise class IGWExcepton(Exception): @@ -153,6 +155,8 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) if not region: From 6b32b95252c582a1687d98e435f5e33726c8a59d Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 16:02:09 -0500 Subject: [PATCH 549/720] ec2_vpc_igw - Rename IGWException to AnsibleIGWException. --- cloud/amazon/ec2_vpc_igw.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 7276157bd56..cbac94528d2 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -96,7 +96,7 @@ except ImportError: raise -class IGWExcepton(Exception): +class AnsibleIGWException(Exception): pass @@ -115,8 +115,8 @@ def ensure_igw_absent(vpc_conn, vpc_id, check_mode): vpc_conn.detach_internet_gateway(igw.id, vpc_id) vpc_conn.delete_internet_gateway(igw.id) except EC2ResponseError as e: - raise IGWExcepton('Unable to delete Internet Gateway, error: {0}' - .format(e)) + raise AnsibleIGWException( + 'Unable to delete Internet Gateway, error: {0}'.format(e)) return {'changed': True} @@ -126,7 +126,7 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): filters={'attachment.vpc-id': vpc_id}) if len(igws) > 1: - raise IGWExcepton( + raise AnsibleIGWException( 'EC2 returned more than one Internet Gateway for VPC {0}, aborting' .format(vpc_id)) @@ -141,8 +141,8 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): vpc_conn.attach_internet_gateway(igw.id, vpc_id) return {'changed': True, 'gateway_id': igw.id} except EC2ResponseError as e: - raise IGWExcepton('Unable to create Internet Gateway, error: {0}' - .format(e)) + raise AnsibleIGWException( + 'Unable to create Internet Gateway, error: {0}'.format(e)) def main(): @@ -181,7 +181,7 @@ def main(): elif state == 'absent': result = ensure_igw_absent(vpc_conn, vpc_id, check_mode=module.check_mode) - except IGWExcepton as e: + except AnsibleIGWException as e: module.fail_json(msg=str(e)) module.exit_json(**result) From c21eebdd7b40f76a5e9d6d60102773e597f096a6 Mon Sep 17 00:00:00 2001 From: Rob White Date: Sun, 14 Jun 2015 16:31:31 +1000 Subject: [PATCH 550/720] Updated documentation and added boto profile support. --- cloud/amazon/ec2_vpc_igw.py | 94 ++++++++++++------------------------- 1 file changed, 30 insertions(+), 64 deletions(-) diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index cbac94528d2..63be48248ef 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -1,75 +1,42 @@ #!/usr/bin/python -# This file is part of Ansible # -# Ansible is free software: you can redistribute it and/or modify +# This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# Ansible is distributed in the hope that it will be useful, +# This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# along with this library. If not, see . DOCUMENTATION = ''' --- module: ec2_vpc_igw -short_description: configure AWS virtual private clouds +short_description: Manage an AWS VPC Internet gateway description: - - Create or terminates AWS internat gateway in a virtual private cloud. ''' -'''This module has a dependency on python-boto. -version_added: "1.8" + - Manage an AWS VPC Internet gateway +version_added: "2.0" +author: Robert Estelle, @erydo options: vpc_id: description: - - "The VPC ID for which to create or remove the Internet Gateway." + - The VPC ID for the VPC in which to manage the Internet Gateway. required: true + default: null state: description: - Create or terminate the IGW - required: true - default: present - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto''' -''' versions >= 2.6.0. required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - -requirements: [ "boto" ] -author: Robert Estelle + default: present +extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. # The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use @@ -147,40 +114,39 @@ def ensure_igw_present(vpc_conn, vpc_id, check_mode): def main(): argument_spec = ec2_argument_spec() - argument_spec.update({ - 'vpc_id': {'required': True}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - }) + argument_spec.update( + dict( + vpc_id = dict(required=True), + state = dict(choices=['present', 'absent'], default='present') + ) + ) + module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - if not region: - module.fail_json(msg='Region must be specified') + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - vpc_conn = boto.vpc.connect_to_region( - region, - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key - ) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") vpc_id = module.params.get('vpc_id') state = module.params.get('state', 'present') try: if state == 'present': - result = ensure_igw_present(vpc_conn, vpc_id, - check_mode=module.check_mode) + result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode) elif state == 'absent': - result = ensure_igw_absent(vpc_conn, vpc_id, - check_mode=module.check_mode) + result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode) except AnsibleIGWException as e: module.fail_json(msg=str(e)) From 5f5577e110aec5f44f6544fc3ecbbeaf2230a025 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 12 Apr 2015 23:09:45 +0200 Subject: [PATCH 551/720] cloudstack: add new module cs_template --- cloud/cloudstack/cs_template.py | 633 ++++++++++++++++++++++++++++++++ 1 file changed, 633 insertions(+) create mode 100644 cloud/cloudstack/cs_template.py diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py new file mode 100644 index 00000000000..48f00fad553 --- /dev/null +++ b/cloud/cloudstack/cs_template.py @@ -0,0 +1,633 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_template +short_description: Manages templates on Apache CloudStack based clouds. +description: + - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates. +version_added: '2.0' +author: '"René Moser (@resmo)" ' +options: + name: + description: + - Name of the template. + required: true + url: + description: + - URL of where the template is hosted. + - Mutually exclusive with C(vm). + required: false + default: null + vm: + description: + - VM name the template will be created from its volume or alternatively from a snapshot. + - VM must be in stopped state if created from its volume. + - Mutually exclusive with C(url). + required: false + default: null + snapshot: + description: + - Name of the snapshot, created from the VM ROOT volume, the template will be created from. + - C(vm) is required together with this argument. + required: false + default: null + os_type: + description: + - OS type that best represents the OS of this template. + required: false + default: null + checksum: + description: + - The MD5 checksum value of this template. + - If set, we search by checksum instead of name. + required: false + default: false + is_ready: + description: + - This flag is used for searching existing templates. + - If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed. + - Recommended to set it to C(false). + required: false + default: false + is_public: + description: + - Register the template to be publicly available to all users. + - Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the template to be featured. + - Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory. + - Only used if C(state) is present. + required: false + default: false + project: + description: + - Name of the project the template to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the template to be registered or deleted from. + - If not specified, first found zone will be used. + required: false + default: null + template_filter: + description: + - Name of the filter used to search for the template. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ] + hypervisor: + description: + - Name the hypervisor to be used for creating the new template. + - Relevant when using C(state=present). + required: false + default: none + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + requires_hvm: + description: + - true if this template requires HVM. + required: false + default: false + password_enabled: + description: + - True if the template supports the password reset feature. + required: false + default: false + template_tag: + description: + - the tag for this template. + required: false + default: null + sshkey_enabled: + description: + - True if the template supports the sshkey upload feature. + required: false + default: false + is_routing: + description: + - True if the template type is routing i.e., if template is used to deploy router. + - Only considered if C(url) is used. + required: false + default: false + format: + description: + - The format for the template. + - Relevant when using C(state=present). + required: false + default: null + choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ] + is_extractable: + description: + - True if the template or its derivatives are extractable. + required: false + default: false + details: + description: + - Template details in key/value pairs. + required: false + default: null + bits: + description: + - 32 or 64 bits support. + required: false + default: '64' + displaytext: + description: + - the display text of the template. + required: true + default: null + state: + description: + - State of the template. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Register a systemvm template +- local_action: + module: cs_template + name: systemvm-4.5 + url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova" + hypervisor: VMware + format: OVA + zone: tokio-ix + os_type: Debian GNU/Linux 7(64-bit) + is_routing: yes + +# Create a template from a stopped virtual machine's volume +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Create a template from a virtual machine's root volume snapshot +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + snapshot: ROOT-233_2015061509114 + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Remove a template +- local_action: + module: cs_template + name: systemvm-4.2 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the template. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Displaytext of the template. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +checksum: + description: MD5 checksum of the template. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +status: + description: Status of the template. + returned: success + type: string + sample: Download Complete +is_ready: + description: True if the template is ready to be deployed from. + returned: success + type: boolean + sample: true +is_public: + description: True if the template is public. + returned: success + type: boolean + sample: true +is_featured: + description: True if the template is featured. + returned: success + type: boolean + sample: true +is_extractable: + description: True if the template is extractable. + returned: success + type: boolean + sample: true +format: + description: Format of the template. + returned: success + type: string + sample: OVA +os_type: + description: Typo of the OS. + returned: success + type: string + sample: CentOS 6.5 (64-bit) +password_enabled: + description: True if the reset password feature is enabled, false otherwise. + returned: success + type: boolean + sample: false +sshkey_enabled: + description: true if template is sshkey enabled, false otherwise. + returned: success + type: boolean + sample: false +cross_zones: + description: true if the template is managed across all zones, false otherwise. + returned: success + type: boolean + sample: false +template_type: + description: Type of the template. + returned: success + type: string + sample: USER +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +template_tag: + description: Template tag related to this template. + returned: success + type: string + sample: special +hypervisor: + description: Hypervisor related to this template. + returned: success + type: string + sample: VMware +tags: + description: List of resource tags associated with the template. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +zone: + description: Name of zone the template is registered in. + returned: success + type: string + sample: zuerich +domain: + description: Domain the template is related to. + returned: success + type: string + sample: example domain +account: + description: Account the template is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the template is related to. + returned: success + type: string + sample: Production +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackTemplate(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('displaytext') + args['bits'] = self.module.params.get('bits') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isextractable'] = self.module.params.get('is_extractable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + args['passwordenabled'] = self.module.params.get('password_enabled') + args['requireshvm'] = self.module.params.get('requires_hvm') + args['templatetag'] = self.module.params.get('template_tag') + args['ostypeid'] = self.get_os_type(key='id') + + if not args['ostypeid']: + self.module.fail_json(msg="Missing required arguments: os_type") + + if not args['displaytext']: + args['displaytext'] = self.module.params.get('name') + return args + + + def get_root_volume(self, key=None): + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + args['type'] = "ROOT" + + volumes = self.cs.listVolumes(**args) + if volumes: + return self._get_by_key(key, volumes['volume'][0]) + self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name')) + + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['volumeid'] = self.get_root_volume('id') + snapshots = self.cs.listSnapshots(**args) + if snapshots: + for s in snapshots['snapshot']: + if snapshot in [ s['name'], s['id'] ]: + return self._get_by_key(key, s) + self.module.fail_json(msg="Snapshot '%s' not found" % snapshot) + + + def create_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + + args = self._get_args() + snapshot_id = self.get_snapshot(key='id') + if snapshot_id: + args['snapshotid'] = snapshot_id + else: + args['volumeid'] = self.get_root_volume('id') + + if not self.module.check_mode: + template = self.cs.createTemplate(**args) + + if 'errortext' in template: + self.module.fail_json(msg="Failed: '%s'" % template['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self._poll_job(template, 'template') + return template + + + def register_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + args = self._get_args() + args['url'] = self.module.params.get('url') + args['format'] = self.module.params.get('format') + args['checksum'] = self.module.params.get('checksum') + args['isextractable'] = self.module.params.get('is_extractable') + args['isrouting'] = self.module.params.get('is_routing') + args['sshkeyenabled'] = self.module.params.get('sshkey_enabled') + args['hypervisor'] = self.get_hypervisor() + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + if not self.module.check_mode: + res = self.cs.registerTemplate(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + template = res['template'] + return template + + + def get_template(self): + args = {} + args['isready'] = self.module.params.get('is_ready') + args['templatefilter'] = self.module.params.get('template_filter') + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + templates = self.cs.listTemplates(**args) + if templates: + # if checksum is set, we only look on that. + if not checksum: + return templates['template'][0] + else: + for i in templates['template']: + if i['checksum'] == checksum: + return i + return None + + + def remove_template(self): + template = self.get_template() + if template: + self.result['changed'] = True + + args = {} + args['id'] = template['id'] + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.cs.deleteTemplate(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'template') + return template + + + def get_result(self, template): + if template: + if 'displaytext' in template: + self.result['displaytext'] = template['displaytext'] + if 'name' in template: + self.result['name'] = template['name'] + if 'hypervisor' in template: + self.result['hypervisor'] = template['hypervisor'] + if 'zonename' in template: + self.result['zone'] = template['zonename'] + if 'checksum' in template: + self.result['checksum'] = template['checksum'] + if 'format' in template: + self.result['format'] = template['format'] + if 'isready' in template: + self.result['is_ready'] = template['isready'] + if 'ispublic' in template: + self.result['is_public'] = template['ispublic'] + if 'isfeatured' in template: + self.result['is_featured'] = template['isfeatured'] + if 'isextractable' in template: + self.result['is_extractable'] = template['isextractable'] + # and yes! it is really camelCase! + if 'crossZones' in template: + self.result['cross_zones'] = template['crossZones'] + if 'ostypename' in template: + self.result['os_type'] = template['ostypename'] + if 'templatetype' in template: + self.result['template_type'] = template['templatetype'] + if 'passwordenabled' in template: + self.result['password_enabled'] = template['passwordenabled'] + if 'sshkeyenabled' in template: + self.result['sshkey_enabled'] = template['sshkeyenabled'] + if 'status' in template: + self.result['status'] = template['status'] + if 'created' in template: + self.result['created'] = template['created'] + if 'templatetag' in template: + self.result['template_tag'] = template['templatetag'] + if 'tags' in template: + self.result['tags'] = [] + for tag in template['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'domain' in template: + self.result['domain'] = template['domain'] + if 'account' in template: + self.result['account'] = template['account'] + if 'project' in template: + self.result['project'] = template['project'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + url = dict(default=None), + vm = dict(default=None), + snapshot = dict(default=None), + os_type = dict(default=None), + is_ready = dict(type='bool', choices=BOOLEANS, default=False), + is_public = dict(type='bool', choices=BOOLEANS, default=True), + is_featured = dict(type='bool', choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False), + is_extractable = dict(type='bool', choices=BOOLEANS, default=False), + is_routing = dict(type='bool', choices=BOOLEANS, default=False), + checksum = dict(default=None), + template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), + requires_hvm = dict(type='bool', choices=BOOLEANS, default=False), + password_enabled = dict(type='bool', choices=BOOLEANS, default=False), + template_tag = dict(default=None), + sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False), + format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None), + details = dict(default=None), + bits = dict(type='int', choices=[ 32, 64 ], default=64), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + mutually_exclusive = ( + ['url', 'vm'], + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['format', 'url', 'hypervisor'], + ), + required_one_of = ( + ['url', 'vm'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_tpl = AnsibleCloudStackTemplate(module) + + state = module.params.get('state') + if state in ['absent']: + tpl = acs_tpl.remove_template() + else: + url = module.params.get('url') + if url: + tpl = acs_tpl.register_template() + else: + tpl = acs_tpl.create_template() + + result = acs_tpl.get_result(tpl) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 96d82b4f9ef61aab4e5a340eefbac973883adecb Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 15 Jun 2015 12:12:49 +0200 Subject: [PATCH 552/720] cloudstack: fix clean_up arg to be boolean in cs_network --- cloud/cloudstack/cs_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index c8b3b32539d..e22eaf0a5c3 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -116,7 +116,7 @@ options: - Cleanup old network elements. - Only considered on C(state=restarted). required: false - default: null + default: false acl_type: description: - Access control type. @@ -584,7 +584,7 @@ def main(): vlan = dict(default=None), vpc = dict(default=None), isolated_pvlan = dict(default=None), - clean_up = dict(default=None), + clean_up = dict(type='bool', choices=BOOLEANS, default=False), network_domain = dict(default=None), state = dict(choices=['present', 'absent', 'restarted' ], default='present'), acl_type = dict(choices=['account', 'domain'], default='account'), From 5c39a5cc197f7874595bb19bdd611a759a07518b Mon Sep 17 00:00:00 2001 From: whiter Date: Wed, 15 Apr 2015 17:45:41 +1000 Subject: [PATCH 553/720] New module - ec2_eni --- cloud/amazon/ec2_eni.py | 404 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 404 insertions(+) create mode 100644 cloud/amazon/ec2_eni.py diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py new file mode 100644 index 00000000000..2b34e9b9405 --- /dev/null +++ b/cloud/amazon/ec2_eni.py @@ -0,0 +1,404 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_eni +short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance +description: + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance. +version_added: "2.0" +author: Rob White, wimnat [at] gmail.com, @wimnat +options: + eni_id: + description: + - The ID of the ENI + required = false + default = null + instance_id: + description: + - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. + required: false + default: null + private_ip_address: + description: + - Private IP address. + required: false + default: null + subnet_id: + description: + - ID of subnet in which to create the ENI. Only required when state=present. + required: true + description: + description: + - Optional description of the ENI. + required: false + default: null + security_groups: + description: + - List of security groups associated with the interface. Only used when state=present. + required: false + default: null + state: + description: + - Create or delete ENI. + required: false + default: present + choices: [ 'present', 'absent' ] + device_index: + description: + - The index of the device for the network interface attachment on the instance. + required: false + default: 0 + force_detach: + description: + - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent. + required: false + default: no + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation. + required: false + source_dest_check: + description: + - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation. + required: false +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ENI. As no security group is defined, ENI will be created in default security group +- ec2_eni: + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI and attach it to an instance +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Destroy an ENI, detaching it from any instance if necessary +- ec2_eni: + eni_id: eni-xxxxxxx + force_detach: yes + state: absent + +# Update an ENI +- ec2_eni: + eni_id: eni-xxxxxxx + description: "My new description" + state: present + +# Detach an ENI from an instance +- ec2_eni: + eni_id: eni-xxxxxxx + instance_id: None + state: present + +### Delete an interface on termination +# First create the interface +- ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + register: eni + +# Modify the interface to enable the delete_on_terminaton flag +- ec2_eni: + eni_id: {{ "eni.interface.id" }} + delete_on_termination: true + +''' + +import time +import xml.etree.ElementTree as ET +import re + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_error_message(xml_string): + + root = ET.fromstring(xml_string) + for message in root.findall('.//Message'): + return message.text + + +def get_eni_info(interface): + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + +def wait_for_eni(eni, status): + + while True: + time.sleep(3) + eni.update() + # If the status is detached we just need attachment to disappear + if eni.attachment is None: + if status == "detached": + break + else: + if status == "attached" and eni.attachment.status == "attached": + break + + +def create_eni(connection, module): + + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + changed = False + + try: + eni = compare_eni(connection, module) + if eni is None: + eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups) + if instance_id is not None: + try: + eni.attach(instance_id, device_index) + except BotoServerError as ex: + eni.delete() + raise + changed = True + # Wait to allow creation / attachment to finish + wait_for_eni(eni, "attached") + eni.update() + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def modify_eni(connection, module): + + eni_id = module.params.get("eni_id") + instance_id = module.params.get("instance_id") + if instance_id == 'None': + instance_id = None + do_detach = True + else: + do_detach = False + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + force_detach = module.params.get("force_detach") + source_dest_check = module.params.get("source_dest_check") + delete_on_termination = module.params.get("delete_on_termination") + changed = False + + + try: + # Get the eni with the eni_id specified + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + if description is not None: + if eni.description != description: + connection.modify_network_interface_attribute(eni.id, "description", description) + changed = True + if security_groups is not None: + if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups): + connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups) + changed = True + if source_dest_check is not None: + if eni.source_dest_check != source_dest_check: + connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check) + changed = True + if delete_on_termination is not None: + if eni.attachment is not None: + if eni.attachment.delete_on_termination is not delete_on_termination: + connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) + changed = True + else: + module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached") + if eni.attachment is not None and instance_id is None and do_detach is True: + eni.detach(force_detach) + wait_for_eni(eni, "detached") + changed = True + else: + if instance_id is not None: + eni.attach(instance_id, device_index) + wait_for_eni(eni, "attached") + changed = True + + except BotoServerError as e: + print e + module.fail_json(msg=get_error_message(e.args[2])) + + eni.update() + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def delete_eni(connection, module): + + eni_id = module.params.get("eni_id") + force_detach = module.params.get("force_detach") + + try: + eni_result_set = connection.get_all_network_interfaces(eni_id) + eni = eni_result_set[0] + + if force_detach is True: + if eni.attachment is not None: + eni.detach(force_detach) + # Wait to allow detachment to finish + wait_for_eni(eni, "detached") + eni.update() + eni.delete() + changed = True + else: + eni.delete() + changed = True + + module.exit_json(changed=changed) + except BotoServerError as e: + msg = get_error_message(e.args[2]) + regex = re.compile('The networkInterface ID \'.*\' does not exist') + if regex.search(msg) is not None: + module.exit_json(changed=False) + else: + module.fail_json(msg=get_error_message(e.args[2])) + +def compare_eni(connection, module): + + eni_id = module.params.get("eni_id") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = module.params.get('security_groups') + + try: + all_eni = connection.get_all_network_interfaces(eni_id) + + for eni in all_eni: + remote_security_groups = get_sec_group_list(eni.groups) + if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): + return eni + + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + return None + +def get_sec_group_list(groups): + + # Build list of remote security groups + remote_security_groups = [] + for group in groups: + remote_security_groups.append(group.id.encode()) + + return remote_security_groups + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + eni_id = dict(default=None), + instance_id = dict(default=None), + private_ip_address = dict(), + subnet_id = dict(), + description = dict(), + security_groups = dict(type='list'), + device_index = dict(default=0, type='int'), + state = dict(default='present', choices=['present', 'absent']), + force_detach = dict(default='no', type='bool'), + source_dest_check = dict(default=None, type='bool'), + delete_on_termination = dict(default=None, type='bool') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + state = module.params.get("state") + eni_id = module.params.get("eni_id") + + if state == 'present': + if eni_id is None: + if module.params.get("subnet_id") is None: + module.fail_json(msg="subnet_id must be specified when state=present") + create_eni(connection, module) + else: + modify_eni(connection, module) + elif state == 'absent': + if eni_id is None: + module.fail_json(msg="eni_id must be specified") + else: + delete_eni(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From 8311854fa6a93b10da38c83ad5d62269337e5feb Mon Sep 17 00:00:00 2001 From: whiter Date: Tue, 16 Jun 2015 12:21:37 +1000 Subject: [PATCH 554/720] New module - ec2_eni_facts --- cloud/amazon/ec2_eni_facts.py | 135 ++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 cloud/amazon/ec2_eni_facts.py diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py new file mode 100644 index 00000000000..94b586fb639 --- /dev/null +++ b/cloud/amazon/ec2_eni_facts.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_eni_facts +short_description: Gather facts about ec2 ENI interfaces in AWS +description: + - Gather facts about ec2 ENI interfaces in AWS +version_added: "2.0" +author: Rob White, wimnat [at] gmail.com, @wimnat +options: + eni_id: + description: + - The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned. + required = false + default = null +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all ENIs +- ec2_eni_facts: + +# Gather facts about a particular ENI +- ec2_eni_facts: + eni_id: eni-xxxxxxx + +''' + +import xml.etree.ElementTree as ET + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_error_message(xml_string): + + root = ET.fromstring(xml_string) + for message in root.findall('.//Message'): + return message.text + + +def get_eni_info(interface): + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + + +def list_eni(connection, module): + + eni_id = module.params.get("eni_id") + interface_dict_array = [] + + try: + all_eni = connection.get_all_network_interfaces(eni_id) + except BotoServerError as e: + module.fail_json(msg=get_error_message(e.args[2])) + + for interface in all_eni: + interface_dict_array.append(get_eni_info(interface)) + + module.exit_json(interfaces=interface_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + eni_id = dict(default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_eni(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From 8829b818b8a1c603364f5af548705625fc9af718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 14:14:12 +0100 Subject: [PATCH 555/720] add function for servicegrup downtimes --- monitoring/nagios.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 5744fb28875..5b14b331624 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -169,6 +169,7 @@ def main(): 'silence_nagios', 'unsilence_nagios', 'command', + 'servicegroup_downtime' ] module = AnsibleModule( @@ -176,6 +177,7 @@ def main(): action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), host=dict(required=False, default=None), + servicegroup=dict(required=False, default=None), minutes=dict(default=30), cmdfile=dict(default=which_cmdfile()), services=dict(default=None, aliases=['service']), @@ -185,6 +187,7 @@ def main(): action = module.params['action'] host = module.params['host'] + servicegroup = module.params['servicegroup'] minutes = module.params['minutes'] services = module.params['services'] cmdfile = module.params['cmdfile'] @@ -201,7 +204,7 @@ def main(): # 'minutes' and 'service' manually. ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: + if action not in ['command', 'silence_nagios', 'unsilence_nagios', 'servicegroup_downtime']: if not host: module.fail_json(msg='no host specified for action requiring one') ###################################################################### @@ -217,6 +220,20 @@ def main(): except Exception: module.fail_json(msg='invalid entry for minutes') + ###################################################################### + + if action == 'servicegroup_downtime': + # Make sure there's an actual service selected + if not servicegroup: + module.fail_json(msg='no servicegroup selected to set downtime for') + # Make sure minutes is a number + try: + m = int(minutes) + if not isinstance(m, types.IntType): + module.fail_json(msg='minutes must be a number') + except Exception: + module.fail_json(msg='invalid entry for minutes') + ################################################################## if action in ['enable_alerts', 'disable_alerts']: if not services: @@ -259,6 +276,7 @@ class Nagios(object): self.action = kwargs['action'] self.author = kwargs['author'] self.host = kwargs['host'] + self.service_group = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] @@ -847,6 +865,9 @@ class Nagios(object): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) + if self.action == "servicegroup_downtime": + if self.services == 'servicegroup': + self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) # toggle the host AND service alerts elif self.action == 'silence': From 0b9863ed0e903b0da5b8ad9d548d4a96cbcd2ea3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 14:36:04 +0100 Subject: [PATCH 556/720] divided between host an service downtimes --- monitoring/nagios.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 5b14b331624..510ca720fd7 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -33,7 +33,8 @@ options: required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command" ] + "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", + "servicegroup_host_downtime" ] host: description: - Host to operate on in Nagios. @@ -90,6 +91,12 @@ EXAMPLES = ''' # schedule downtime for a few services - nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} +# set 30 minutes downtime for all services in servicegroup foo +- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + +# set 30 minutes downtime for all host in servicegroup foo +- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} + # enable SMART disk alerts - nagios: action=enable_alerts service=smart host={{ inventory_hostname }} @@ -169,9 +176,11 @@ def main(): 'silence_nagios', 'unsilence_nagios', 'command', - 'servicegroup_downtime' + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', ] + module = AnsibleModule( argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), @@ -222,8 +231,8 @@ def main(): ###################################################################### - if action == 'servicegroup_downtime': - # Make sure there's an actual service selected + if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: + # Make sure there's an actual servicegroup selected if not servicegroup: module.fail_json(msg='no servicegroup selected to set downtime for') # Make sure minutes is a number @@ -865,7 +874,10 @@ class Nagios(object): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) - if self.action == "servicegroup_downtime": + elif self.action == "servicegroup_host_downtime": + if self.services == 'servicegroup': + self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + elif self.action == "servicegroup_service_downtime": if self.services == 'servicegroup': self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) From 304abbce854b81e071334f973be10e4453a004ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 15:00:57 +0100 Subject: [PATCH 557/720] improved docs --- monitoring/nagios.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 510ca720fd7..4fb44ea0089 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -66,6 +66,10 @@ options: aliases: [ "service" ] required: true default: null + servicegroup: + description: + - the Servicegroup we want to set downtimes/alerts for. + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: - The raw command to send to nagios, which From f9041a1b29f115791cc244bda10c5fad7976e0a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Gr=C3=B6ning?= Date: Fri, 7 Nov 2014 17:16:48 +0100 Subject: [PATCH 558/720] fix bugs --- monitoring/nagios.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 4fb44ea0089..7177ffd2f43 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -217,7 +217,7 @@ def main(): # 'minutes' and 'service' manually. ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios', 'servicegroup_downtime']: + if action not in ['command', 'silence_nagios', 'unsilence_nagios']: if not host: module.fail_json(msg='no host specified for action requiring one') ###################################################################### @@ -289,7 +289,7 @@ class Nagios(object): self.action = kwargs['action'] self.author = kwargs['author'] self.host = kwargs['host'] - self.service_group = kwargs['servicegroup'] + self.servicegroup = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] @@ -879,11 +879,11 @@ class Nagios(object): services=self.services, minutes=self.minutes) elif self.action == "servicegroup_host_downtime": - if self.services == 'servicegroup': - self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes) elif self.action == "servicegroup_service_downtime": - if self.services == 'servicegroup': - self.schedule_servicegroup_host_downtime(self, self.servicegroup, minutes=30) + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes) # toggle the host AND service alerts elif self.action == 'silence': From bc440ade79115acd83e58f01e7e7e737d430efd2 Mon Sep 17 00:00:00 2001 From: Nicolas Brisac Date: Fri, 14 Nov 2014 17:09:24 +0100 Subject: [PATCH 559/720] Allow filtering of routed/forwarded packets MAN page states the following : Rules for traffic not destined for the host itself but instead for traffic that should be routed/forwarded through the firewall should specify the route keyword before the rule (routing rules differ significantly from PF syntax and instead take into account netfilter FORWARD chain conventions). For example: ufw route allow in on eth1 out on eth2 This commit introduces a new parameter "route=yes/no" to allow just that. --- system/ufw.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/system/ufw.py b/system/ufw.py index 3694f2b937a..91d574f945d 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -116,6 +116,11 @@ options: - Specify interface for rule. required: false aliases: ['if'] + route: + description: + - Apply the rule to routed/forwarded packets. + required: false + choices: ['yes', 'no'] ''' EXAMPLES = ''' @@ -165,6 +170,10 @@ ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 # Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. # Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 + +# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24. +# Can be used to further restrict a global FORWARD policy set to allow +ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24 ''' from operator import itemgetter @@ -178,6 +187,7 @@ def main(): logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete = dict(default=False, type='bool'), + route = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), @@ -241,10 +251,11 @@ def main(): elif command == 'rule': # Rules are constructed according to the long format # - # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([module.boolean(params['route']), 'route']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([module.boolean(params['log']), 'log']) From b80d2b3cfaf3e166c0de06802ea328069d365910 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Nov 2014 15:50:27 -0600 Subject: [PATCH 560/720] Adding VERSION file for 1.8.0 --- VERSION | 1 + 1 file changed, 1 insertion(+) create mode 100644 VERSION diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000..27f9cd322bb --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.8.0 From 9859aa0435faeec49a5b31f0b7275ef868e95597 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 26 Nov 2014 21:32:16 -0600 Subject: [PATCH 561/720] Version bump for extras release 1.8.1 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 27f9cd322bb..a8fdfda1c78 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.0 +1.8.1 From 45423973fc23a2f184d2f871f16119db5c5102ff Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 4 Dec 2014 15:50:48 -0600 Subject: [PATCH 562/720] Version bump for 1.8.2 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index a8fdfda1c78..53adb84c822 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.1 +1.8.2 From 8383857b5a89f5745268ef9ad38fe2e833604e63 Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Tue, 25 Nov 2014 14:43:47 -0600 Subject: [PATCH 563/720] Fix some logical issues with enabling/disabling a server on the A10. --- network/a10/a10_server.py | 51 +++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 2d7b8cc5d9c..6714366f1b1 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -183,28 +183,35 @@ def main(): json_post = { 'server': { - 'name': slb_server, - 'host': slb_server_ip, - 'status': axapi_enabled_disabled(slb_server_status), - 'port_list': slb_server_ports, + 'name': slb_server, } } + # add optional module parameters + if slb_server_ip: + json_post['server']['host'] = slb_server_ip + + if slb_server_ports: + json_post['server']['port_list'] = slb_server_ports + + if slb_server_status: + json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) slb_server_exists = not axapi_failure(slb_server_data) changed = False if state == 'present': - if not slb_server_ip: - module.fail_json(msg='you must specify an IP address when creating a server') - if not slb_server_exists: + if not slb_server_ip: + module.fail_json(msg='you must specify an IP address when creating a server') + result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) changed = True else: - def needs_update(src_ports, dst_ports): + def port_needs_update(src_ports, dst_ports): ''' Checks to determine if the port definitions of the src_ports array are in or different from those in dst_ports. If there is @@ -227,12 +234,26 @@ def main(): # every port from the src exists in the dst, and none of them were different return False + def status_needs_update(current_status, new_status): + ''' + Check to determine if we want to change the status of a server. + If there is a difference between the current status of the server and + the desired status, return true, otherwise false. + ''' + if current_status != new_status: + return True + return False + defined_ports = slb_server_data.get('server', {}).get('port_list', []) + current_status = slb_server_data.get('server', {}).get('status') - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): + # we check for a needed update several ways + # - in case ports are missing from the ones specified by the user + # - in case ports are missing from those on the device + # - in case we are change the status of a server + if port_needs_update(defined_ports, slb_server_ports) + or port_needs_update(slb_server_ports, defined_ports) + or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) @@ -249,10 +270,10 @@ def main(): result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) changed = True else: - result = dict(msg="the server was not present") + result = dict(msg="the server was not present") - # if the config has changed, save the config unless otherwise requested - if changed and write_config: + # if the config has changed, or we want to force a save, save the config unless otherwise requested + if changed or write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) From 669316195f3dc44e7fcbc24636065a32b889bf04 Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Thu, 4 Dec 2014 16:15:23 -0600 Subject: [PATCH 564/720] Fix small issue with wrapping syntax --- network/a10/a10_server.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 6714366f1b1..72ed0f648e6 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -251,9 +251,7 @@ def main(): # - in case ports are missing from the ones specified by the user # - in case ports are missing from those on the device # - in case we are change the status of a server - if port_needs_update(defined_ports, slb_server_ports) - or port_needs_update(slb_server_ports, defined_ports) - or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): + if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) From 2be58620e26f456ea2aa4594b6aafddd299e2390 Mon Sep 17 00:00:00 2001 From: Giovanni Tirloni Date: Thu, 22 Jan 2015 09:13:12 -0500 Subject: [PATCH 565/720] add createparent option to zfs create --- system/zfs.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index fed17b4a18d..503ca7d09ef 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -250,7 +250,7 @@ class Zfs(object): if self.module.check_mode: self.changed = True return - properties=self.properties + properties = self.properties volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) if "@" in self.name: @@ -260,6 +260,10 @@ class Zfs(object): cmd = [self.module.get_bin_path('zfs', True)] cmd.append(action) + + if createparent: + cmd.append('-p') + if volblocksize: cmd.append('-b %s' % volblocksize) if properties: @@ -271,7 +275,7 @@ class Zfs(object): cmd.append(self.name) (rc, err, out) = self.module.run_command(' '.join(cmd)) if rc == 0: - self.changed=True + self.changed = True else: self.module.fail_json(msg=out) @@ -345,6 +349,7 @@ def main(): 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, 'copies': {'required': False, 'choices':['1', '2', '3']}, + 'createparent': {'required': False, 'choices':['on', 'off']}, 'dedup': {'required': False, 'choices':['on', 'off']}, 'devices': {'required': False, 'choices':['on', 'off']}, 'exec': {'required': False, 'choices':['on', 'off']}, @@ -396,7 +401,7 @@ def main(): result['name'] = name result['state'] = state - zfs=Zfs(module, name, properties) + zfs = Zfs(module, name, properties) if state == 'present': if zfs.exists(): From 3d2f19c24d8dff48410964acd0703925f3102bd1 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Tue, 17 Feb 2015 16:56:15 +1100 Subject: [PATCH 566/720] Fix display of error message It was crashing due to "domain" variable not being defined --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index dc70d0e5569..9fd840f1992 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -292,7 +292,7 @@ def main(): if not "value" in new_record: if not current_record: module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) module.exit_json(changed=False, result=current_record) # create record as it does not exist From bdeb0bc8db6511d6b6ff886819a37e40ab6ef056 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Tue, 17 Feb 2015 17:13:27 +1100 Subject: [PATCH 567/720] If record_value="" write empty value to dns made easy This is necessary for instance when setting CNAMEs that point to the root of the domain. This is different than leaving record_value out completely which has the same behaviour as before --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 9fd840f1992..4cb6d7d96a1 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -275,7 +275,7 @@ def main(): current_record = DME.getRecordByName(record_name) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: - if module.params[i]: + if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] # Compare new record against existing one From 5ef2dd8a7710944abbae38ca799096808dd5fc50 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Wed, 18 Feb 2015 10:42:07 +1100 Subject: [PATCH 568/720] If record_name="" write empty value to dns made easy This is necessary for instance when setting MX records on the root of a domain. This is different than leaving record_name out completely which has the same behaviour as before --- network/dnsmadeeasy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 4cb6d7d96a1..46d6769f951 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -264,7 +264,7 @@ def main(): record_name = module.params["record_name"] # Follow Keyword Controlled Behavior - if not record_name: + if record_name is None: domain_records = DME.getRecords() if not domain_records: module.fail_json( From b0992a97efe81ff45ad1b789d0951e4a98a95d70 Mon Sep 17 00:00:00 2001 From: Matthew Landauer Date: Wed, 18 Feb 2015 12:14:58 +1100 Subject: [PATCH 569/720] Handle MX,NS,TXT records correctly and don't assume one record type per name --- network/dnsmadeeasy.py | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index 46d6769f951..fcc7232a0da 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -134,6 +134,7 @@ class DME2: self.domain_map = None # ["domain_name"] => ID self.record_map = None # ["record_name"] => ID self.records = None # ["record_ID"] => + self.all_records = None # Lookup the domain ID if passed as a domain name vs. ID if not self.domain.isdigit(): @@ -191,11 +192,33 @@ class DME2: return self.records.get(record_id, False) - def getRecordByName(self, record_name): - if not self.record_map: - self._instMap('record') - - return self.getRecord(self.record_map.get(record_name, 0)) + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + # TODO SRV type not yet implemented + if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["MX", "NS", "TXT"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') def getRecords(self): return self.query(self.record_url, 'GET')['data'] @@ -262,6 +285,8 @@ def main(): "account_secret"], module.params["domain"], module) state = module.params["state"] record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] # Follow Keyword Controlled Behavior if record_name is None: @@ -272,11 +297,15 @@ def main(): module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one - current_record = DME.getRecordByName(record_name) + current_record = DME.getMatchingRecord(record_name, record_type, record_value) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] # Compare new record against existing one changed = False From 49ab501be482dd1417dac7c58e7af1297975b55b Mon Sep 17 00:00:00 2001 From: Kevin Klinemeier Date: Sun, 15 Mar 2015 21:42:35 -0700 Subject: [PATCH 570/720] Updated tags example to an actual datadog tag --- monitoring/datadog_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 1d6a98dc9c3..5319fcb0f1b 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -71,7 +71,7 @@ datadog_event: title="Testing from ansible" text="Test!" priority="low" # Post an event with several tags datadog_event: title="Testing from ansible" text="Test!" api_key="6873258723457823548234234234" - tags=aa,bb,cc + tags=aa,bb,#host:{{ inventory_hostname }} ''' import socket From d604f5616230d88af786a59d63e5a0a5f539b585 Mon Sep 17 00:00:00 2001 From: Todd Zullinger Date: Wed, 18 Mar 2015 15:07:56 -0400 Subject: [PATCH 571/720] monitoring/nagios: Allow comment to be specified The default remains 'Scheduling downtime' but can be overridden. --- monitoring/nagios.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 7177ffd2f43..5fd51d17123 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -52,6 +52,11 @@ options: Only usable with the C(downtime) action. required: false default: Ansible + comment: + description: + - Comment for C(downtime) action. + required: false + default: Scheduling downtime minutes: description: - Minutes to schedule downtime for. @@ -89,6 +94,10 @@ EXAMPLES = ''' # schedule an hour of HOST downtime - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} +# schedule an hour of HOST downtime, with a comment describing the reason +- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + comment='This host needs disciplined' + # schedule downtime for ALL services on HOST - nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} @@ -189,6 +198,7 @@ def main(): argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), + comment=dict(default='Scheduling downtime'), host=dict(required=False, default=None), servicegroup=dict(required=False, default=None), minutes=dict(default=30), @@ -288,6 +298,7 @@ class Nagios(object): self.module = module self.action = kwargs['action'] self.author = kwargs['author'] + self.comment = kwargs['comment'] self.host = kwargs['host'] self.servicegroup = kwargs['servicegroup'] self.minutes = int(kwargs['minutes']) @@ -324,7 +335,7 @@ class Nagios(object): cmdfile=self.cmdfile) def _fmt_dt_str(self, cmd, host, duration, author=None, - comment="Scheduling downtime", start=None, + comment=None, start=None, svc=None, fixed=1, trigger=0): """ Format an external-command downtime string. @@ -357,6 +368,9 @@ class Nagios(object): if not author: author = self.author + if not comment: + comment = self.comment + if svc is not None: dt_args = [svc, str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] From 3ea8ac0e13f67d1405057635b64798f5e80f5477 Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Tue, 31 Mar 2015 16:43:40 -0400 Subject: [PATCH 572/720] \login_password with missing login_user not caught #363 --- database/misc/mongodb_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 83a3395216e..ab690f883a8 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -222,7 +222,7 @@ def main(): if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: + elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: From 62a7742481ee13a2e7bf421ac7052b6c3bae19c9 Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Thu, 9 Apr 2015 14:03:14 -0400 Subject: [PATCH 573/720] fixes issue #362 --- database/misc/mongodb_user.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index ab690f883a8..907aeadc802 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -134,7 +134,15 @@ else: # MongoDB module specific support methods. # +def user_find(client, user): + for mongo_user in client["admin"].system.users.find(): + if mongo_user['user'] == user: + return mongo_user + return False + def user_add(module, client, db_name, user, password, roles): + #pymono's user_add is a _create_or_update_user so we won't know if it was changed or updated + #without reproducing a lot of the logic in database.py of pymongo db = client[db_name] if roles is None: db.add_user(user, password, False) @@ -147,9 +155,13 @@ def user_add(module, client, db_name, user, password, roles): err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' module.fail_json(msg=err_msg) -def user_remove(client, db_name, user): - db = client[db_name] - db.remove_user(user) +def user_remove(module, client, db_name, user): + exists = user_find(client, user) + if exists: + db = client[db_name] + db.remove_user(user) + else: + module.exit_json(changed=False, user=user) def load_mongocnf(): config = ConfigParser.RawConfigParser() @@ -208,15 +220,6 @@ def main(): else: client = MongoClient(login_host, int(login_port), ssl=ssl) - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: @@ -227,6 +230,10 @@ def main(): if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password) + elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): + if db_name != "admin": + module.fail_json(msg='The localhost login exception only allows the first admin account to be created') + #else: this has to be the first admin user added except ConnectionFailure, e: module.fail_json(msg='unable to connect to database: %s' % str(e)) @@ -242,7 +249,7 @@ def main(): elif state == 'absent': try: - user_remove(client, db_name, user) + user_remove(module, client, db_name, user) except OperationFailure, e: module.fail_json(msg='Unable to remove user: %s' % str(e)) From d1c68eea9f7a99a41d5e8c630ceff8e4eecb97f3 Mon Sep 17 00:00:00 2001 From: Solomon Gifford Date: Thu, 9 Apr 2015 14:22:24 -0400 Subject: [PATCH 574/720] #364 Added support for update_password=dict(default="always", choices=["always", "on_create"]) --- database/misc/mongodb_user.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 907aeadc802..9802f890a35 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -87,6 +87,14 @@ options: required: false default: present choices: [ "present", "absent" ] + update_password: + required: false + default: always + choices: ['always', 'on_create'] + version_added: "2.1" + description: + - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html @@ -196,6 +204,7 @@ def main(): ssl=dict(default=False), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default="always", choices=["always", "on_create"]), ) ) @@ -213,6 +222,7 @@ def main(): ssl = module.params['ssl'] roles = module.params['roles'] state = module.params['state'] + update_password = module.params['update_password'] try: if replica_set: @@ -239,8 +249,11 @@ def main(): module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': - if password is None: - module.fail_json(msg='password parameter required when adding a user') + if password is None and update_password == 'always': + module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') + + if update_password != 'always' and user_find(client, user): + password = None try: user_add(module, client, db_name, user, password, roles) From 05e0b35a45f9e66dfd996fe6ca3ec118b9b2f2d6 Mon Sep 17 00:00:00 2001 From: Benjamin Albrecht Date: Tue, 14 Apr 2015 20:56:36 +0200 Subject: [PATCH 575/720] Fix possible values for zfs sync property --- system/zfs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index 503ca7d09ef..97a0d6f3dba 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -177,7 +177,7 @@ options: description: - The sync property. required: False - choices: ['on','off'] + choices: ['standard','always','disabled'] utf8only: description: - The utf8only property. @@ -373,7 +373,7 @@ def main(): 'sharenfs': {'required': False}, 'sharesmb': {'required': False}, 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['on', 'off']}, + 'sync': {'required': False, 'choices':['standard', 'always', 'disabled']}, # Not supported #'userquota': {'required': False}, 'utf8only': {'required': False, 'choices':['on', 'off']}, From 02258902f9d948a22fa96a4763a3e5531637e4f9 Mon Sep 17 00:00:00 2001 From: NewGyu Date: Wed, 29 Apr 2015 23:59:16 +0900 Subject: [PATCH 576/720] fix cannot download SNAPSHOT version --- packaging/language/maven_artifact.py | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index d6dd33166dc..057cb0a3814 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -184,29 +184,12 @@ class MavenDownloader: if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) - basexpath = "/metadata/versioning/" - p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion") - if p: - return self._find_matching_artifact(p, artifact) + timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] + buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) else: return self._uri_for_artifact(artifact) - def _find_matching_artifact(self, elems, artifact): - filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems) - if artifact.classifier: - filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems) - - if len(filtered) > 1: - print( - "There was more than one match. Selecting the first one. Try adding a classifier to get a better match.") - elif not len(filtered): - print("There were no matches.") - return None - - elem = filtered[0] - value = elem.xpath("value/text()") - return self._uri_for_artifact(artifact, value[0]) - def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) @@ -309,7 +292,7 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state dest = dict(default=None), ) ) From f605e388273af8cd309a9a26eb28e2fe22bc2e37 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 3 May 2015 20:58:21 +0100 Subject: [PATCH 577/720] Add webfaction modules --- cloud/webfaction/__init__.py | 0 cloud/webfaction/webfaction_app.py | 153 ++++++++++++++++++++ cloud/webfaction/webfaction_db.py | 147 +++++++++++++++++++ cloud/webfaction/webfaction_domain.py | 134 ++++++++++++++++++ cloud/webfaction/webfaction_mailbox.py | 112 +++++++++++++++ cloud/webfaction/webfaction_site.py | 189 +++++++++++++++++++++++++ 6 files changed, 735 insertions(+) create mode 100644 cloud/webfaction/__init__.py create mode 100644 cloud/webfaction/webfaction_app.py create mode 100644 cloud/webfaction/webfaction_db.py create mode 100644 cloud/webfaction/webfaction_domain.py create mode 100644 cloud/webfaction/webfaction_mailbox.py create mode 100644 cloud/webfaction/webfaction_site.py diff --git a/cloud/webfaction/__init__.py b/cloud/webfaction/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py new file mode 100644 index 00000000000..b1ddcd5a9c0 --- /dev/null +++ b/cloud/webfaction/webfaction_app.py @@ -0,0 +1,153 @@ +#! /usr/bin/python +# Create a Webfaction application using Ansible and the Webfaction API +# +# Valid application types can be found by looking here: +# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_app +short_description: Add or remove applications on a Webfaction host +description: + - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + name: + description: + - The name of the application + required: true + default: null + + state: + description: + - Whether the application should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list. + required: true + + autostart: + description: + - Whether the app should restart with an autostart.cgi script + required: false + default: "no" + + extra_info: + description: + - Any extra parameters required by the app + required: false + default: null + + open_port: + required: false + default: false + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + type = dict(required=True), + autostart = dict(required=False, choices=BOOLEANS, default='false'), + extra_info = dict(required=False, default=""), + port_open = dict(required=False, default="false"), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + app_name = module.params['name'] + app_type = module.params['type'] + app_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + app_list = webfaction.list_apps(session_id) + app_map = dict([(i['name'], i) for i in app_list]) + existing_app = app_map.get(app_name) + + result = {} + + # Here's where the real stuff happens + + if app_state == 'present': + + # Does an app with this name already exist? + if existing_app: + if existing_app['type'] != app_type: + module.fail_json(msg="App already exists with different type. Please fix by hand.") + + # If it exists with the right type, we don't change it + # Should check other parameters. + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, create the app + result.update( + webfaction.create_app( + session_id, app_name, app_type, + module.boolean(module.params['autostart']), + module.params['extra_info'], + module.boolean(module.params['port_open']) + ) + ) + + elif app_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_app: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_app(session_id, app_name) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(app_state)) + + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py new file mode 100644 index 00000000000..7205a084ef2 --- /dev/null +++ b/cloud/webfaction/webfaction_db.py @@ -0,0 +1,147 @@ +#! /usr/bin/python +# Create webfaction database using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_db +short_description: Add or remove a database on Webfaction +description: + - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + name: + description: + - The name of the database + required: true + default: null + + state: + description: + - Whether the database should exist + required: false + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of database to create. + required: true + choices: ['mysql', 'postgresql'] + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + # This will also create a default DB user with the same + # name as the database, and the specified password. + + - name: Create a database + webfaction_db: + name: "{{webfaction_user}}_db1" + password: mytestsql + type: mysql + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + # You can specify an IP address or hostname. + type = dict(required=True, default=None), + password = dict(required=False, default=None), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + db_name = module.params['name'] + db_state = module.params['state'] + db_type = module.params['type'] + db_passwd = module.params['password'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + db_list = webfaction.list_dbs(session_id) + db_map = dict([(i['name'], i) for i in db_list]) + existing_db = db_map.get(db_name) + + result = {} + + # Here's where the real stuff happens + + if db_state == 'present': + + # Does an app with this name already exist? + if existing_db: + # Yes, but of a different type - fail + if existing_db['db_type'] != db_type: + module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") + + # If it exists with the right type, we don't change anything. + module.exit_json( + changed = False, + ) + + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_db( + session_id, db_name, db_type, db_passwd + ) + ) + + elif db_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_db: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(db_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py new file mode 100644 index 00000000000..2f3c8542754 --- /dev/null +++ b/cloud/webfaction/webfaction_domain.py @@ -0,0 +1,134 @@ +#! /usr/bin/python +# Create Webfaction domains and subdomains using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_domain +short_description: Add or remove domains and subdomains on Webfaction +description: + - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the domain + required: true + default: null + + state: + description: + - Whether the domain should exist + required: false + choices: ['present', 'absent'] + default: "present" + + subdomains: + description: + - Any subdomains to create. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + subdomains = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + domain_name = module.params['name'] + domain_state = module.params['state'] + domain_subdomains = module.params['subdomains'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + domain_list = webfaction.list_domains(session_id) + domain_map = dict([(i['domain'], i) for i in domain_list]) + existing_domain = domain_map.get(domain_name) + + result = {} + + # Here's where the real stuff happens + + if domain_state == 'present': + + # Does an app with this name already exist? + if existing_domain: + + if set(existing_domain['subdomains']) >= set(domain_subdomains): + # If it exists with the right subdomains, we don't change anything. + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_domain( + *positional_args + ) + ) + + elif domain_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_domain: + module.exit_json( + changed = False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_domain(*positional_args) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(domain_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py new file mode 100644 index 00000000000..3ac848d6a94 --- /dev/null +++ b/cloud/webfaction/webfaction_mailbox.py @@ -0,0 +1,112 @@ +#! /usr/bin/python +# Create webfaction mailbox using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser and Andy Baker 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_mailbox +short_description: Add or remove mailboxes on Webfaction +description: + - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. +options: + + mailbox_name: + description: + - The name of the mailbox + required: true + default: null + + mailbox_password: + description: + - The password for the mailbox + required: true + default: null + + state: + description: + - Whether the mailbox should exist + required: false + choices: ['present', 'absent'] + default: "present" + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec=dict( + mailbox_name=dict(required=True, default=None), + mailbox_password=dict(required=True), + state=dict(required=False, default='present'), + login_name=dict(required=True), + login_password=dict(required=True), + ), + supports_check_mode=True + ) + + mailbox_name = module.params['mailbox_name'] + site_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + mailbox_list = webfaction.list_mailboxes(session_id) + existing_mailbox = mailbox_name in mailbox_list + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a mailbox with this name already exist? + if existing_mailbox: + module.exit_json(changed=False,) + + positional_args = [session_id, mailbox_name] + + if not module.check_mode: + # If this isn't a dry run, create the mailbox + result.update(webfaction.create_mailbox(*positional_args)) + + elif site_state == 'absent': + + # If the mailbox is already not there, nothing changed. + if not existing_mailbox: + module.exit_json(changed=False) + + if not module.check_mode: + # If this isn't a dry run, delete the mailbox + result.update(webfaction.delete_mailbox(session_id, mailbox_name)) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json(changed=True, result=result) + +# The conventional ending +main() + diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py new file mode 100644 index 00000000000..5db89355966 --- /dev/null +++ b/cloud/webfaction/webfaction_site.py @@ -0,0 +1,189 @@ +#! /usr/bin/python +# Create Webfaction website using Ansible and the Webfaction API +# +# Quentin Stafford-Fraser 2015 + +DOCUMENTATION = ''' +--- +module: webfaction_site +short_description: Add or remove a website on a Webfaction host +description: + - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser +version_added: 1.99 +notes: + - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. + - If a site of the same name exists in the account but on a different host, the operation will exit. + - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the website + required: true + default: null + + state: + description: + - Whether the website should exist + required: false + choices: ['present', 'absent'] + default: "present" + + host: + description: + - The webfaction host on which the site should be created. + required: true + + https: + description: + - Whether or not to use HTTPS + required: false + choices: BOOLEANS + default: 'false' + + site_apps: + description: + - A mapping of URLs to apps + required: false + + subdomains: + description: + - A list of subdomains associated with this site. + required: false + default: null + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: create website + webfaction_site: + name: testsite1 + state: present + host: myhost.webfaction.com + subdomains: + - 'testsite1.my_domain.org' + site_apps: + - ['testapp1', '/'] + https: no + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket +import xmlrpclib +from ansible.module_utils.basic import * + +webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + state = dict(required=False, default='present'), + # You can specify an IP address or hostname. + host = dict(required=True, default=None), + https = dict(required=False, choices=BOOLEANS, default='false'), + subdomains = dict(required=False, default=[]), + site_apps = dict(required=False, default=[]), + login_name = dict(required=True), + login_password = dict(required=True), + ), + supports_check_mode=True + ) + site_name = module.params['name'] + site_state = module.params['state'] + site_host = module.params['host'] + site_ip = socket.gethostbyname(site_host) + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + site_list = webfaction.list_websites(session_id) + site_map = dict([(i['name'], i) for i in site_list]) + existing_site = site_map.get(site_name) + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a site with this name already exist? + if existing_site: + + # If yes, but it's on a different IP address, then fail. + # If we wanted to allow relocation, we could add a 'relocate=true' option + # which would get the existing IP address, delete the site there, and create it + # at the new address. A bit dangerous, perhaps, so for now we'll require manual + # deletion if it's on another host. + + if existing_site['ip'] != site_ip: + module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") + + # If it's on this host and the key parameters are the same, nothing needs to be done. + + if (existing_site['https'] == module.boolean(module.params['https'])) and \ + (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ + (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): + module.exit_json( + changed = False + ) + + positional_args = [ + session_id, site_name, site_ip, + module.boolean(module.params['https']), + module.params['subdomains'], + ] + for a in module.params['site_apps']: + positional_args.append( (a[0], a[1]) ) + + if not module.check_mode: + # If this isn't a dry run, create or modify the site + result.update( + webfaction.create_website( + *positional_args + ) if not existing_site else webfaction.update_website ( + *positional_args + ) + ) + + elif site_state == 'absent': + + # If the site's already not there, nothing changed. + if not existing_site: + module.exit_json( + changed = False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the site + result.update( + webfaction.delete_website(session_id, site_name, site_ip) + ) + + else: + module.fail_json(msg="Unknown state specified: {}".format(site_state)) + + module.exit_json( + changed = True, + result = result + ) + +# The conventional ending +main() + From d524d450aef07cc3a828e5f1887dda8049855d1a Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 3 May 2015 23:48:51 +0100 Subject: [PATCH 578/720] Tidying of webfaction modules --- cloud/webfaction/webfaction_app.py | 12 +++++------- cloud/webfaction/webfaction_db.py | 10 ++++------ cloud/webfaction/webfaction_domain.py | 8 +++----- cloud/webfaction/webfaction_mailbox.py | 9 ++++----- cloud/webfaction/webfaction_site.py | 14 +++++++------- 5 files changed, 23 insertions(+), 30 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index b1ddcd5a9c0..08a0205eb87 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -13,7 +13,7 @@ short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -23,7 +23,6 @@ options: description: - The name of the application required: true - default: null state: description: @@ -65,7 +64,6 @@ options: ''' import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -73,12 +71,12 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), type = dict(required=True), - autostart = dict(required=False, choices=BOOLEANS, default='false'), + autostart = dict(required=False, choices=BOOLEANS, default=False), extra_info = dict(required=False, default=""), - port_open = dict(required=False, default="false"), + port_open = dict(required=False, choices=BOOLEANS, default=False), login_name = dict(required=True), login_password = dict(required=True), ), @@ -148,6 +146,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 7205a084ef2..479540abc5c 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -10,7 +10,7 @@ short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -20,7 +20,6 @@ options: description: - The name of the database required: true - default: null state: description: @@ -61,7 +60,6 @@ EXAMPLES = ''' import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -69,10 +67,10 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), # You can specify an IP address or hostname. - type = dict(required=True, default=None), + type = dict(required=True), password = dict(required=False, default=None), login_name = dict(required=True), login_password = dict(required=True), @@ -142,6 +140,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 2f3c8542754..a9e2b7dd9bb 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -10,7 +10,7 @@ short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." @@ -22,7 +22,6 @@ options: description: - The name of the domain required: true - default: null state: description: @@ -50,7 +49,6 @@ options: import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -58,7 +56,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), @@ -129,6 +127,6 @@ def main(): result = result ) -# The conventional ending +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 3ac848d6a94..1ba571a1dd1 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -10,7 +10,7 @@ short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. @@ -20,7 +20,6 @@ options: description: - The name of the mailbox required: true - default: null mailbox_password: description: @@ -48,7 +47,6 @@ options: import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -56,7 +54,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - mailbox_name=dict(required=True, default=None), + mailbox_name=dict(required=True), mailbox_password=dict(required=True), state=dict(required=False, default='present'), login_name=dict(required=True), @@ -107,6 +105,7 @@ def main(): module.exit_json(changed=True, result=result) -# The conventional ending + +from ansible.module_utils.basic import * main() diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 5db89355966..575e6eec996 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -10,7 +10,7 @@ short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 1.99 +version_added: 2.0 notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. - If a site of the same name exists in the account but on a different host, the operation will exit. @@ -23,7 +23,6 @@ options: description: - The name of the website required: true - default: null state: description: @@ -83,7 +82,6 @@ EXAMPLES = ''' import socket import xmlrpclib -from ansible.module_utils.basic import * webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') @@ -91,11 +89,11 @@ def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, default=None), + name = dict(required=True), state = dict(required=False, default='present'), # You can specify an IP address or hostname. - host = dict(required=True, default=None), - https = dict(required=False, choices=BOOLEANS, default='false'), + host = dict(required=True), + https = dict(required=False, choices=BOOLEANS, default=False), subdomains = dict(required=False, default=[]), site_apps = dict(required=False, default=[]), login_name = dict(required=True), @@ -184,6 +182,8 @@ def main(): result = result ) -# The conventional ending + + +from ansible.module_utils.basic import * main() From 4a2e5e4a653c783a10535b173e5293e840744364 Mon Sep 17 00:00:00 2001 From: fdupoux Date: Sat, 9 May 2015 14:06:58 +0100 Subject: [PATCH 579/720] Suppress prompts from lvcreate using --yes when LVM supports this option --- system/lvol.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index 7ec5ec5cd64..43511ae7b7a 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -85,6 +85,8 @@ import re decimal_point = re.compile(r"(\.|,)") +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) def parse_lvs(data): lvs = [] @@ -97,6 +99,17 @@ def parse_lvs(data): return lvs +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command("%s version" % (ver_cmd)) + if rc != 0: + return None + m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + def main(): module = AnsibleModule( argument_spec=dict( @@ -109,6 +122,13 @@ def main(): supports_check_mode=True, ) + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found == None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + yesopt = "--yes" if version_found >= version_yesopt else "" + vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] @@ -189,7 +209,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s %s -n %s -%s %s%s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From 70983f397698af1b90164ddd8940edd6c38b79c6 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 20:40:50 +0100 Subject: [PATCH 580/720] Documentation version_added numbers are strings. --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 08a0205eb87..dec5f8e5d5e 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -13,7 +13,7 @@ short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 479540abc5c..fc522439591 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -10,7 +10,7 @@ short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index a9e2b7dd9bb..31339014e6c 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -10,7 +10,7 @@ short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 1ba571a1dd1..5eb82df3eaa 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -10,7 +10,7 @@ short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API `_ for more info. diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 575e6eec996..c981a21fc2b 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -10,7 +10,7 @@ short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser -version_added: 2.0 +version_added: "2.0" notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. - If a site of the same name exists in the account but on a different host, the operation will exit. From 1a19b96464396ac77444b152c59a8c37372d8a7e Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 20:47:31 +0100 Subject: [PATCH 581/720] Available choices for 'state' explicitly listed. --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index dec5f8e5d5e..05b31f55a4a 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -72,7 +72,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), type = dict(required=True), autostart = dict(required=False, choices=BOOLEANS, default=False), extra_info = dict(required=False, default=""), diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index fc522439591..784477c5409 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -68,7 +68,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. type = dict(required=True), password = dict(required=False, default=None), diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 31339014e6c..8548c4fba37 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -57,7 +57,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), login_password = dict(required=True), diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 5eb82df3eaa..fee5700e50e 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -56,7 +56,7 @@ def main(): argument_spec=dict( mailbox_name=dict(required=True), mailbox_password=dict(required=True), - state=dict(required=False, default='present'), + state=dict(required=False, choices=['present', 'absent'], default='present'), login_name=dict(required=True), login_password=dict(required=True), ), diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index c981a21fc2b..a5be4f5407b 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -90,7 +90,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(required=False, default='present'), + state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host = dict(required=True), https = dict(required=False, choices=BOOLEANS, default=False), From 25acd524e789211ce63308da6c99a32220f21303 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Sun, 10 May 2015 22:07:49 +0100 Subject: [PATCH 582/720] Add examples. --- cloud/webfaction/webfaction_app.py | 10 ++++++++++ cloud/webfaction/webfaction_domain.py | 20 ++++++++++++++++++++ cloud/webfaction/webfaction_mailbox.py | 10 ++++++++++ 3 files changed, 40 insertions(+) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 05b31f55a4a..20e94a7b5f6 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -63,6 +63,16 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a test app + webfaction_app: + name="my_wsgi_app1" + state=present + type=mod_wsgi35-python27 + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + import xmlrpclib webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index 8548c4fba37..c99a0f23f6d 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -47,6 +47,26 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a test domain + webfaction_domain: + name: mydomain.com + state: present + subdomains: + - www + - blog + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + - name: Delete test domain and any subdomains + webfaction_domain: + name: mydomain.com + state: absent + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + +''' + import socket import xmlrpclib diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index fee5700e50e..87ca1fd1a26 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -45,6 +45,16 @@ options: required: true ''' +EXAMPLES = ''' + - name: Create a mailbox + webfaction_mailbox: + mailbox_name="mybox" + mailbox_password="myboxpw" + state=present + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + import socket import xmlrpclib From 2bcb0d4c080abf94bd7e06b61f5ea5a38a7da58a Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 10:56:22 +0200 Subject: [PATCH 583/720] added lower function for statuses --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index e87d8edca5a..4ad202993fe 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -77,7 +77,7 @@ def main(): # Process 'name' Running - restart pending parts = line.split() if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: - return ' '.join(parts[2:]) + return ' '.join(parts[2:]).lower() else: return '' From 16db10958b0a163118da3fea544881846162a6c6 Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 10:58:47 +0200 Subject: [PATCH 584/720] fix a problem with status detection after unmonitor command --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 4ad202993fe..6afb95d093d 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -119,7 +119,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) status = run_command('unmonitor') - if status in ['not monitored']: + if status in ['not monitored'] or 'unmonitor pending' in status: module.exit_json(changed=True, name=name, state=state) module.fail_json(msg='%s process not unmonitored' % name, status=status) From 51b11fd1af70f335282bfa30450520a815965981 Mon Sep 17 00:00:00 2001 From: Lorenzo Luconi Trombacchi Date: Tue, 12 May 2015 11:07:52 +0200 Subject: [PATCH 585/720] status function was called twice --- monitoring/monit.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 6afb95d093d..6410ce815e8 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -86,7 +86,8 @@ def main(): module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) return status() - present = status() != '' + process_status = status() + present = process_status != '' if not present and not state == 'present': module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) @@ -102,7 +103,7 @@ def main(): module.exit_json(changed=True, name=name, state=state) module.exit_json(changed=False, name=name, state=state) - running = 'running' in status() + running = 'running' in process_status if running and state in ['started', 'monitored']: module.exit_json(changed=False, name=name, state=state) From 3b44082dd67de15d6c27cfba1836f04e83914797 Mon Sep 17 00:00:00 2001 From: Chris Long Date: Tue, 12 May 2015 22:10:53 +1000 Subject: [PATCH 586/720] Initial commit of nmcli: NetworkManager module. Currently supports: Create, modify, remove of - team, team-slave, bond, bond-slave, ethernet TODO: vlan, bridge, wireless related connections. --- network/nmcli.py | 1089 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1089 insertions(+) create mode 100644 network/nmcli.py diff --git a/network/nmcli.py b/network/nmcli.py new file mode 100644 index 00000000000..0532058da3b --- /dev/null +++ b/network/nmcli.py @@ -0,0 +1,1089 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Chris Long +# +# This file is a module for Ansible that interacts with Network Manager +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION=''' +--- +module: nmcli +author: Chris Long +short_description: Manage Networking +requirements: [ nmcli, dbus ] +description: + - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc. +options: + state: + required: True + default: "present" + choices: [ present, absent ] + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + enabled: + required: False + default: "yes" + choices: [ "yes", "no" ] + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + - Whether the connection profile can be automatically activated ( default: yes) + action: + required: False + default: None + choices: [ add, modify, show, up, down ] + description: + - Set to 'add' if you want to add a connection. + - Set to 'modify' if you want to modify a connection. Modify one or more properties in the connection profile. + - Set to 'delete' if you want to delete a connection. Delete a configured connection. The connection to be deleted is identified by its name 'cfname'. + - Set to 'show' if you want to show a connection. Will show all devices unless 'cfname' is set. + - Set to 'up' if you want to bring a connection up. Requires 'cfname' to be set. + - Set to 'down' if you want to bring a connection down. Requires 'cfname' to be set. + cname: + required: True + default: None + description: + - Where CNAME will be the name used to call the connection. when not provided a default name is generated: [-][-] + ifname: + required: False + default: cname + description: + - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. + - interface to bind the connection to. The connection will only be applicable to this interface name. + - A special value of "*" can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. + type: + required: False + choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ] + description: + - This is the type of device or network connection that you wish to create. + mode: + required: False + choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ] + default: None + description: + - This is the type of device or network connection that you wish to create for a bond, team or bridge. (NetworkManager default: balance-rr) + master: + required: False + default: None + description: + - master ] STP forwarding delay, in seconds (NetworkManager default: 15) + hellotime: + required: False + default: None + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds (NetworkManager default: 2) + maxage: + required: False + default: None + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds (NetworkManager default: 20) + ageingtime: + required: False + default: None + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds (NetworkManager default: 300) + mac: + required: False + default: None + description: + - This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel) + slavepriority: + required: False + default: None + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave (default: 32) + path_cost: + required: False + default: None + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave (NetworkManager default: 100) + hairpin: + required: False + default: None + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. (NetworkManager default: yes) + vlanid: + required: False + default: None + description: + - This is only used with VLAN - VLAN ID in range <0-4095> + vlandev: + required: False + default: None + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname + flags: + required: False + default: None + description: + - This is only used with VLAN - flags + ingress: + required: False + default: None + description: + - This is only used with VLAN - VLAN ingress priority mapping + egress: + required: False + default: None + description: + - This is only used with VLAN - VLAN egress priority mapping + +''' + +EXAMPLES=''' +The following examples are working examples that I have run in the field. I followed follow the structure: +``` +|_/inventory/cloud-hosts +| /group_vars/openstack-stage.yml +| /host_vars/controller-01.openstack.host.com +| /host_vars/controller-02.openstack.host.com +|_/playbook/library/nmcli.py +| /playbook-add.yml +| /playbook-del.yml +``` + +## inventory examples +### groups_vars +```yml +--- +#devops_os_define_network +storage_gw: "192.168.0.254" +external_gw: "10.10.0.254" +tenant_gw: "172.100.0.254" + +#Team vars +nmcli_team: + - {cname: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {cname: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} + - {cname: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} +nmcli_team_slave: + - {cname: 'em1', ifname: 'em1', master: 'tenant'} + - {cname: 'em2', ifname: 'em2', master: 'tenant'} + - {cname: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {cname: 'p2p2', ifname: 'p2p2', master: 'external'} + +#bond vars +nmcli_bond: + - {cname: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'} + - {cname: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'} + - {cname: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'} +nmcli_bond_slave: + - {cname: 'em1', ifname: 'em1', master: 'tenant'} + - {cname: 'em2', ifname: 'em2', master: 'tenant'} + - {cname: 'p2p1', ifname: 'p2p1', master: 'storage'} + - {cname: 'p2p2', ifname: 'p2p2', master: 'external'} + +#ethernet vars +nmcli_ethernet: + - {cname: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} + - {cname: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"} + - {cname: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} + - {cname: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} +``` + +### host_vars +```yml +--- +storage_ip: "192.168.160.21/23" +external_ip: "10.10.152.21/21" +tenant_ip: "192.168.200.21/23" +``` + + + +## playbook-add.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + +- name: install needed network manager libs + yum: name={{ item }} state=installed + with_items: + - libnm-qt-devel.x86_64 + - nm-connection-editor.x86_64 + - libsemanage-python + - policycoreutils-python + +##### Working with all cloud nodes - Teaming + - name: try nmcli add team - cname only & ip4 gw4 + nmcli: type=team cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} state=present + with_items: + - "{{nmcli_team}}" + + - name: try nmcli add teams-slave + nmcli: type=team-slave cname={{item.cname}} ifname={{item.ifname}} master={{item.master}} state=present + with_items: + - "{{nmcli_team_slave}}" + +###### Working with all cloud nodes - Bonding +# - name: try nmcli add bond - cname only & ip4 gw4 mode +# nmcli: type=bond cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present +# with_items: +# - "{{nmcli_bond}}" +# +# - name: try nmcli add bond-slave +# nmcli: type=bond-slave cname={{item.cname}} ifname={{item.ifname}} master={{item.master}} state=present +# with_items: +# - "{{nmcli_bond_slave}}" + +##### Working with all cloud nodes - Ethernet +# - name: nmcli add Ethernet - cname only & ip4 gw4 +# nmcli: type=ethernet cname={{item.cname}} ip4={{item.ip4}} gw4={{item.gw4}} state=present +# with_items: +# - "{{nmcli_ethernet}}" +``` + +## playbook-del.yml example + +```yml +--- +- hosts: openstack-stage + remote_user: root + tasks: + + - name: try nmcli del team - multiple + nmcli: cname={{item.cname}} state=absent + with_items: + - { cname: 'em1'} + - { cname: 'em2'} + - { cname: 'p1p1'} + - { cname: 'p1p2'} + - { cname: 'p2p1'} + - { cname: 'p2p2'} + - { cname: 'tenant'} + - { cname: 'storage'} + - { cname: 'external'} + - { cname: 'team-em1'} + - { cname: 'team-em2'} + - { cname: 'team-p1p1'} + - { cname: 'team-p1p2'} + - { cname: 'team-p2p1'} + - { cname: 'team-p2p2'} +``` +# To add an Ethernet connection with static IP configuration, issue a command as follows +- nmcli: cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + +# To add an Team connection with static IP configuration, issue a command as follows +- nmcli: cname=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present enabled=yes + +# Optionally, at the same time specify IPv6 addresses for the device as follows: +- nmcli: cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present + +# To add two IPv4 DNS server addresses: +-nmcli: cname=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present + +# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows +- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present + +# To change the property of a setting e.g. MTU, issue a command as follows: +- nmcli: cname=my-eth1 mtu=9000 state=present + + Exit Status's: + - nmcli exits with status 0 if it succeeds, a value greater than 0 is + returned if an error occurs. + - 0 Success - indicates the operation succeeded + - 1 Unknown or unspecified error + - 2 Invalid user input, wrong nmcli invocation + - 3 Timeout expired (see --wait option) + - 4 Connection activation failed + - 5 Connection deactivation failed + - 6 Disconnecting device failed + - 7 Connection deletion failed + - 8 NetworkManager is not running + - 9 nmcli and NetworkManager versions mismatch + - 10 Connection, device, or access point does not exist. +''' +# import ansible.module_utils.basic +import os +import syslog +import sys +import dbus +from gi.repository import NetworkManager, NMClient + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform='Generic' + distribution=None + bus=dbus.SystemBus() + # The following is going to be used in dbus code + DEVTYPES={1: "Ethernet", + 2: "Wi-Fi", + 5: "Bluetooth", + 6: "OLPC", + 7: "WiMAX", + 8: "Modem", + 9: "InfiniBand", + 10: "Bond", + 11: "VLAN", + 12: "ADSL", + 13: "Bridge", + 14: "Generic", + 15: "Team" + } + STATES={0: "Unknown", + 10: "Unmanaged", + 20: "Unavailable", + 30: "Disconnected", + 40: "Prepare", + 50: "Config", + 60: "Need Auth", + 70: "IP Config", + 80: "IP Check", + 90: "Secondaries", + 100: "Activated", + 110: "Deactivating", + 120: "Failed" + } + + def __new__(cls, *args, **kwargs): + return load_platform_subclass(Nmcli, args, kwargs) + + def __init__(self, module): + self.module=module + self.state=module.params['state'] + self.enabled=module.params['enabled'] + self.action=module.params['action'] + self.cname=module.params['cname'] + self.master=module.params['master'] + self.autoconnect=module.params['autoconnect'] + self.ifname=module.params['ifname'] + self.type=module.params['type'] + self.ip4=module.params['ip4'] + self.gw4=module.params['gw4'] + self.dns4=module.params['dns4'] + self.ip6=module.params['ip6'] + self.gw6=module.params['gw6'] + self.dns6=module.params['dns6'] + self.mtu=module.params['mtu'] + self.stp=module.params['stp'] + self.priority=module.params['priority'] + self.mode=module.params['mode'] + self.miimon=module.params['miimon'] + self.downdelay=module.params['downdelay'] + self.updelay=module.params['updelay'] + self.arp_interval=module.params['arp_interval'] + self.arp_ip_target=module.params['arp_ip_target'] + self.slavepriority=module.params['slavepriority'] + self.forwarddelay=module.params['forwarddelay'] + self.hellotime=module.params['hellotime'] + self.maxage=module.params['maxage'] + self.ageingtime=module.params['ageingtime'] + self.mac=module.params['mac'] + self.vlanid=module.params['vlanid'] + self.vlandev=module.params['vlandev'] + self.flags=module.params['flags'] + self.ingress=module.params['ingress'] + self.egress=module.params['egress'] + # select whether we dump additional debug info through syslog + self.syslogging=True + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + if self.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) + + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def merge_secrets(self, proxy, config, setting_name): + try: + # returns a dict of dicts mapping name::setting, where setting is a dict + # mapping key::value. Each member of the 'setting' dict is a secret + secrets=proxy.GetSecrets(setting_name) + + # Copy the secrets into our connection config + for setting in secrets: + for key in secrets[setting]: + config[setting_name][key]=secrets[setting][key] + except Exception, e: + pass + + def dict_to_string(self, d): + # Try to trivially translate a dictionary's elements into nice string + # formatting. + dstr="" + for key in d: + val=d[key] + str_val="" + add_string=True + if type(val)==type(dbus.Array([])): + for elt in val: + if type(elt)==type(dbus.Byte(1)): + str_val+="%s " % int(elt) + elif type(elt)==type(dbus.String("")): + str_val+="%s" % elt + elif type(val)==type(dbus.Dictionary({})): + dstr+=self.dict_to_string(val) + add_string=False + else: + str_val=val + if add_string: + dstr+="%s: %s\n" % ( key, str_val) + return dstr + + def connection_to_string(self, config): + # dump a connection configuration to use in list_connection_info + setting_list=[] + for setting_name in config: + setting_list.append(self.dict_to_string(config[setting_name])) + return setting_list + # print "" + + def list_connection_info(self): + # Ask the settings service for the list of connections it provides + bus=dbus.SystemBus() + + service_name="org.freedesktop.NetworkManager" + proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings") + settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings") + connection_paths=settings.ListConnections() + connection_list=[] + # List each connection's name, UUID, and type + for path in connection_paths: + con_proxy=bus.get_object(service_name, path) + settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection") + config=settings_connection.GetSettings() + + # Now get secrets too; we grab the secrets for each type of connection + # (since there isn't a "get all secrets" call because most of the time + # you only need 'wifi' secrets or '802.1x' secrets, not everything) and + # merge that into the configuration data - To use at a later stage + self.merge_secrets(settings_connection, config, '802-11-wireless') + self.merge_secrets(settings_connection, config, '802-11-wireless-security') + self.merge_secrets(settings_connection, config, '802-1x') + self.merge_secrets(settings_connection, config, 'gsm') + self.merge_secrets(settings_connection, config, 'cdma') + self.merge_secrets(settings_connection, config, 'ppp') + + # Get the details of the 'connection' setting + s_con=config['connection'] + connection_list.append(s_con['id']) + connection_list.append(s_con['uuid']) + connection_list.append(s_con['type']) + connection_list.append(self.connection_to_string(config)) + return connection_list + + def connection_exists(self): + # we are going to use name and type in this instance to find if that connection exists and is of type x + connections=self.list_connection_info() + + for con_item in connections: + if self.cname==con_item: + return True + + def down_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # if self.connection_exists(): + cmd.append('con') + cmd.append('down') + cmd.append(self.cname) + return self.execute_command(cmd) + + def up_connection(self): + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('up') + cmd.append(self.cname) + return self.execute_command(cmd) + + def create_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('team') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def modify_connection_team(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + # Can't use MTU with team + return cmd + + def create_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating team-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append(self.type) + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + cmd.append('master') + if self.cname is not None: + cmd.append(self.master) + # if self.mtu is not None: + # cmd.append('802-3-ethernet.mtu') + # cmd.append(self.mtu) + return cmd + + def modify_connection_team_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying team-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + cmd.append('connection.master') + cmd.append(self.master) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + return cmd + + def create_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond interface + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('bond') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + if self.mode is not None: + cmd.append('mode') + cmd.append(self.mode) + if self.miimon is not None: + cmd.append('miimon') + cmd.append(self.miimon) + if self.downdelay is not None: + cmd.append('downdelay') + cmd.append(self.downdelay) + if self.downdelay is not None: + cmd.append('updelay') + cmd.append(self.updelay) + if self.downdelay is not None: + cmd.append('arp-interval') + cmd.append(self.arp_interval) + if self.downdelay is not None: + cmd.append('arp-ip-target') + cmd.append(self.arp_ip_target) + return cmd + + def modify_connection_bond(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def create_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bond-slave interface + cmd.append('connection') + cmd.append('add') + cmd.append('type') + cmd.append('bond-slave') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + cmd.append('master') + if self.cname is not None: + cmd.append(self.master) + return cmd + + def modify_connection_bond_slave(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bond-slave interface + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + cmd.append('connection.master') + cmd.append(self.master) + return cmd + + def create_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('ethernet') + cmd.append('con-name') + if self.cname is not None: + cmd.append(self.cname) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.cname is not None: + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ip4') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('gw4') + cmd.append(self.gw4) + if self.ip6 is not None: + cmd.append('ip6') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('gw6') + cmd.append(self.gw6) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def modify_connection_ethernet(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add cname=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + cmd.append('con') + cmd.append('mod') + cmd.append(self.cname) + if self.ip4 is not None: + cmd.append('ipv4.address') + cmd.append(self.ip4) + if self.gw4 is not None: + cmd.append('ipv4.gateway') + cmd.append(self.gw4) + if self.dns4 is not None: + cmd.append('ipv4.dns') + cmd.append(self.dns4) + if self.ip6 is not None: + cmd.append('ipv6.address') + cmd.append(self.ip6) + if self.gw6 is not None: + cmd.append('ipv6.gateway') + cmd.append(self.gw4) + if self.dns6 is not None: + cmd.append('ipv6.dns') + cmd.append(self.dns6) + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + if self.enabled is not None: + cmd.append('autoconnect') + cmd.append(self.enabled) + return cmd + + def create_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating bridge interface + return cmd + + def modify_connection_bridge(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying bridge interface + return cmd + + def create_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for creating ethernet interface + return cmd + + def modify_connection_vlan(self): + cmd=[self.module.get_bin_path('nmcli', True)] + # format for modifying ethernet interface + return cmd + + def create_connection(self): + cmd=[] + if self.type=='team': + # cmd=self.create_connection_team() + if (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_team() + self.execute_command(cmd) + cmd=self.modify_connection_team() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + elif (self.dns4 is None) or (self.dns6 is None): + cmd=self.create_connection_team() + return self.execute_command(cmd) + elif self.type=='team-slave': + if self.mtu is not None: + cmd=self.create_connection_team_slave() + self.execute_command(cmd) + cmd=self.modify_connection_team_slave() + self.execute_command(cmd) + # cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_team_slave() + return self.execute_command(cmd) + elif self.type=='bond': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_bond() + self.execute_command(cmd) + cmd=self.modify_connection_bond() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_bond() + return self.execute_command(cmd) + elif self.type=='bond-slave': + cmd=self.create_connection_bond_slave() + elif self.type=='ethernet': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd=self.create_connection_ethernet() + self.execute_command(cmd) + cmd=self.modify_connection_ethernet() + self.execute_command(cmd) + cmd=self.up_connection() + return self.execute_command(cmd) + else: + cmd=self.create_connection_ethernet() + return self.execute_command(cmd) + elif self.type=='bridge': + cmd=self.create_connection_bridge() + elif self.type=='vlan': + cmd=self.create_connection_vlan() + return self.execute_command(cmd) + + def remove_connection(self): + # self.down_connection() + cmd=[self.module.get_bin_path('nmcli', True)] + cmd.append('con') + cmd.append('del') + cmd.append(self.cname) + return self.execute_command(cmd) + + def modify_connection(self): + cmd=[] + if self.type=='team': + cmd=self.modify_connection_team() + elif self.type=='team-slave': + cmd=self.modify_connection_team_slave() + elif self.type=='bond': + cmd=self.modify_connection_bond() + elif self.type=='bond-slave': + cmd=self.modify_connection_bond_slave() + elif self.type=='ethernet': + cmd=self.modify_connection_ethernet() + elif self.type=='bridge': + cmd=self.modify_connection_bridge() + elif self.type=='vlan': + cmd=self.modify_connection_vlan() + return self.execute_command(cmd) + + +def main(): + # Parsing argument file + module=AnsibleModule( + argument_spec=dict( + enabled=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + action=dict(required=False, default=None, choices=['add', 'mod', 'show', 'up', 'down', 'del'], type='str'), + state=dict(required=True, default=None, choices=['present', 'absent'], type='str'), + cname=dict(required=False, type='str'), + master=dict(required=False, default=None, type='str'), + autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + ifname=dict(required=False, default=None, type='str'), + type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'), + ip4=dict(required=False, default=None, type='str'), + gw4=dict(required=False, default=None, type='str'), + dns4=dict(required=False, default=None, type='str'), + ip6=dict(required=False, default=None, type='str'), + gw6=dict(required=False, default=None, type='str'), + dns6=dict(required=False, default=None, type='str'), + # Bond Specific vars + mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'), + miimon=dict(required=False, default=None, type='str'), + downdelay=dict(required=False, default=None, type='str'), + updelay=dict(required=False, default=None, type='str'), + arp_interval=dict(required=False, default=None, type='str'), + arp_ip_target=dict(required=False, default=None, type='str'), + # general usage + mtu=dict(required=False, default=None, type='str'), + mac=dict(required=False, default=None, type='str'), + # bridge specific vars + stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'), + priority=dict(required=False, default="128", type='str'), + slavepriority=dict(required=False, default="32", type='str'), + forwarddelay=dict(required=False, default="15", type='str'), + hellotime=dict(required=False, default="2", type='str'), + maxage=dict(required=False, default="20", type='str'), + ageingtime=dict(required=False, default="300", type='str'), + # vlan specific vars + vlanid=dict(required=False, default=None, type='str'), + vlandev=dict(required=False, default=None, type='str'), + flags=dict(required=False, default=None, type='str'), + ingress=dict(required=False, default=None, type='str'), + egress=dict(required=False, default=None, type='str'), + ), + supports_check_mode=True + ) + + nmcli=Nmcli(module) + + if nmcli.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'Nmcli instantiated - platform %s' % nmcli.platform) + if nmcli.distribution: + syslog.syslog(syslog.LOG_NOTICE, 'Nuser instantiated - distribution %s' % nmcli.distribution) + + rc=None + out='' + err='' + result={} + result['cname']=nmcli.cname + result['state']=nmcli.state + + # check for issues + if nmcli.cname is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + # team-slave checks + if nmcli.type=='team-slave' and nmcli.master is None: + nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing") + if nmcli.type=='team-slave' and nmcli.ifname is None: + nmcli.module.fail_json(msg="You haven't specified a name for the connection") + + if nmcli.state=='absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.down_connection() + (rc, out, err)=nmcli.remove_connection() + if rc!=0: + module.fail_json(name =('No Connection named %s exists' % nmcli.cname), msg=err, rc=rc) + + elif nmcli.state=='present': + if nmcli.connection_exists(): + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.cname, nmcli.type)) + result['Exists']='Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.modify_connection() + if not nmcli.connection_exists(): + result['Connection']=('Connection %s of Type %s is being added' % (nmcli.cname, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err)=nmcli.create_connection() + if rc is not None and rc!=0: + module.fail_json(name=nmcli.cname, msg=err, rc=rc) + + if rc is None: + result['changed']=False + else: + result['changed']=True + if out: + result['stdout']=out + if err: + result['stderr']=err + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +main() \ No newline at end of file From 2856116162aa7430368c395ec099888cb1ea7b7b Mon Sep 17 00:00:00 2001 From: Chris Long Date: Fri, 15 May 2015 00:45:51 +1000 Subject: [PATCH 587/720] Updated as per bcoca's comments: removed 'default' in state: removed defunct action: removed reference to load_platform_subclass changed cname to conn_name --- network/nmcli.py | 202 ++++++++++++++++++++++------------------------- 1 file changed, 93 insertions(+), 109 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 0532058da3b..55edb322ad7 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -30,7 +30,6 @@ description: options: state: required: True - default: "present" choices: [ present, absent ] description: - Whether the device should exist or not, taking action if the state is different from what is stated. @@ -41,25 +40,14 @@ options: description: - Whether the service should start on boot. B(At least one of state and enabled are required.) - Whether the connection profile can be automatically activated ( default: yes) - action: - required: False - default: None - choices: [ add, modify, show, up, down ] - description: - - Set to 'add' if you want to add a connection. - - Set to 'modify' if you want to modify a connection. Modify one or more properties in the connection profile. - - Set to 'delete' if you want to delete a connection. Delete a configured connection. The connection to be deleted is identified by its name 'cfname'. - - Set to 'show' if you want to show a connection. Will show all devices unless 'cfname' is set. - - Set to 'up' if you want to bring a connection up. Requires 'cfname' to be set. - - Set to 'down' if you want to bring a connection down. Requires 'cfname' to be set. - cname: + conn_name: required: True default: None description: - - Where CNAME will be the name used to call the connection. when not provided a default name is generated: [-][-] + - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] ifname: required: False - default: cname + default: conn_name description: - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. - interface to bind the connection to. The connection will only be applicable to this interface name. @@ -80,7 +68,7 @@ options: required: False default: None description: - - master Date: Fri, 15 May 2015 01:09:49 +1000 Subject: [PATCH 588/720] Fixed descriptions to all be lists replaced enabled with autoconnect - refactored code to reflect update. removed ansible syslog entry. --- network/nmcli.py | 66 +++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 55edb322ad7..18f0ecbab1f 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -31,25 +31,24 @@ options: state: required: True choices: [ present, absent ] - description: - - Whether the device should exist or not, taking action if the state is different from what is stated. - enabled: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + autoconnect: required: False default: "yes" choices: [ "yes", "no" ] description: - - Whether the service should start on boot. B(At least one of state and enabled are required.) + - Whether the connection should start on boot. - Whether the connection profile can be automatically activated ( default: yes) conn_name: required: True - default: None description: - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] ifname: required: False default: conn_name description: - - Where INAME will be the what we call the interface name. Required with 'up', 'down' modifiers. + - Where IFNAME will be the what we call the interface name. - interface to bind the connection to. The connection will only be applicable to this interface name. - A special value of "*" can be used for interface-independent connections. - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. @@ -72,14 +71,17 @@ options: ip4: required: False default: None - description: The IPv4 address to this interface using this format ie: "192.168.1.24/24" + description: + - The IPv4 address to this interface using this format ie: "192.168.1.24/24" gw4: required: False - description: The IPv4 gateway for this interface using this format ie: "192.168.100.1" + description: + - The IPv4 gateway for this interface using this format ie: "192.168.100.1" dns4: required: False default: None - description: A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] + description: + - A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] ip6: required: False default: None @@ -88,10 +90,12 @@ options: gw6: required: False default: None - description: The IPv6 gateway for this interface using this format ie: "2001:db8::1" + description: + - The IPv6 gateway for this interface using this format ie: "2001:db8::1" dns6: required: False - description: A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] + description: + - A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] mtu: required: False default: None @@ -343,7 +347,7 @@ tenant_ip: "192.168.200.21/23" - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present # To add an Team connection with static IP configuration, issue a command as follows -- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present enabled=yes +- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes # Optionally, at the same time specify IPv6 addresses for the device as follows: - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present @@ -430,10 +434,9 @@ class Nmcli(object): def __init__(self, module): self.module=module self.state=module.params['state'] - self.enabled=module.params['enabled'] + self.autoconnect=module.params['autoconnect'] self.conn_name=module.params['conn_name'] self.master=module.params['master'] - self.autoconnect=module.params['autoconnect'] self.ifname=module.params['ifname'] self.type=module.params['type'] self.ip4=module.params['ip4'] @@ -602,9 +605,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def modify_connection_team(self): @@ -631,9 +634,9 @@ class Nmcli(object): if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) # Can't use MTU with team return cmd @@ -704,9 +707,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) if self.mode is not None: cmd.append('mode') cmd.append(self.mode) @@ -751,9 +754,9 @@ class Nmcli(object): if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def create_connection_bond_slave(self): @@ -820,9 +823,9 @@ class Nmcli(object): if self.gw6 is not None: cmd.append('gw6') cmd.append(self.gw6) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def modify_connection_ethernet(self): @@ -855,9 +858,9 @@ class Nmcli(object): if self.mtu is not None: cmd.append('802-3-ethernet.mtu') cmd.append(self.mtu) - if self.enabled is not None: + if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.enabled) + cmd.append(self.autoconnect) return cmd def create_connection_bridge(self): @@ -966,11 +969,10 @@ def main(): # Parsing argument file module=AnsibleModule( argument_spec=dict( - enabled=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), - conn_name=dict(required=False, type='str'), + conn_name=dict(required=True, type='str'), master=dict(required=False, default=None, type='str'), - autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), ifname=dict(required=False, default=None, type='str'), type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'), ip4=dict(required=False, default=None, type='str'), @@ -1009,12 +1011,6 @@ def main(): nmcli=Nmcli(module) - if nmcli.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Nmcli instantiated - platform %s' % nmcli.platform) - if nmcli.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Nuser instantiated - distribution %s' % nmcli.distribution) - rc=None out='' err='' From 31c63b6755ba2387a301cc9b83f1d3fe54cd1669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 16:47:23 +0300 Subject: [PATCH 589/720] gluster_volume: Typofix in docs (equals, not colon) --- system/gluster_volume.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7b83c62297f..7d080f8bfe6 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -108,7 +108,7 @@ author: '"Taneli Leppä (@rosmo)" ' EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="{{ play_hosts }}" run_once: true - name: tune @@ -127,7 +127,7 @@ EXAMPLES = """ gluster_volume: state=absent name=test1 - name: create gluster volume with multiple bricks - gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="{{ play_hosts }}" run_once: true """ From 9cfe4697516ede70e7722c75adb676c628f7d102 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 16:49:39 +0300 Subject: [PATCH 590/720] gluster_volume: Clarify error message to tell what actualy failed --- system/gluster_volume.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7d080f8bfe6..cb7882f6c56 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -247,11 +247,11 @@ def wait_for_peer(host): time.sleep(1) return False -def probe(host): +def probe(host, myhostname): global module run_gluster([ 'peer', 'probe', host ]) if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) + module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) changed = True def probe_all_peers(hosts, peers, myhostname): @@ -259,7 +259,7 @@ def probe_all_peers(hosts, peers, myhostname): if host not in peers: # dont probe ourselves if myhostname != host: - probe(host) + probe(host, myhostname) def create_volume(name, stripe, replica, transport, hosts, bricks, force): args = [ 'volume', 'create' ] From 02ed758af1de3b4603f179bd0ae4c9940cf4051b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:24:18 +0300 Subject: [PATCH 591/720] gluster_volume: Parameter expects comma separated list of hosts, passing {{play_hosts}} will fail as Python does not parse it into a list --- system/gluster_volume.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index cb7882f6c56..2ea6b974adc 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -108,7 +108,7 @@ author: '"Taneli Leppä (@rosmo)" ' EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11" run_once: true - name: tune @@ -127,7 +127,7 @@ EXAMPLES = """ gluster_volume: state=absent name=test1 - name: create gluster volume with multiple bricks - gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="{{ play_hosts }}" + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11" run_once: true """ From 0cec4527f0108e4374f00e75ca831b256f9c4248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:40:30 +0300 Subject: [PATCH 592/720] gluster_volume: Improved parsing of cluster parameter list --- system/gluster_volume.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 2ea6b974adc..c5d852731c5 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -256,6 +256,7 @@ def probe(host, myhostname): def probe_all_peers(hosts, peers, myhostname): for host in hosts: + host = host.strip() # Clean up any extra space for exact comparison if host not in peers: # dont probe ourselves if myhostname != host: @@ -347,6 +348,11 @@ def main(): if not myhostname: myhostname = socket.gethostname() + # Clean up if last element is empty. Consider that yml can look like this: + # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" + if cluster != None and cluster[-1] == '': + cluster = cluster[0:-1] + if brick_paths != None and "," in brick_paths: brick_paths = brick_paths.split(",") else: From c4afe9c5bbc67bba888e6bd7d7d305fa17181dd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Fri, 15 May 2015 17:55:16 +0300 Subject: [PATCH 593/720] gluster_volume: Finalize brick->bricks transition by previous author --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c5d852731c5..32359cd2a82 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -336,7 +336,7 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_paths = module.params['brick'] + brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] From 5b7b74eb66883b23e610fa7f4dcc63f3d2dc2577 Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Tue, 19 May 2015 15:05:31 +0200 Subject: [PATCH 594/720] Added eval for pasting tag lists --- monitoring/datadog_event.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 5319fcb0f1b..bde5cd80069 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -116,7 +116,10 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") + if module.params['tags'].startswith("[") and module.params['tags'].endswith("]"): + body['tags'] = eval(module.params['tags']) + else: + body['tags'] = module.params['tags'].split(",") if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From 3761052597d67fe011485d055637505a0391ef51 Mon Sep 17 00:00:00 2001 From: Ernst Kuschke Date: Wed, 20 May 2015 16:34:21 +0200 Subject: [PATCH 595/720] Allow any custom chocolatey source This is to allow for a local source (for instance in the form of artifactory) --- windows/win_chocolatey.ps1 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index 22e0d83e77c..de42434da76 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -112,9 +112,9 @@ Else If ($params.source) { $source = $params.source.ToString().ToLower() - If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby")) + If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby") -and (!$source.startsWith("http://", "CurrentCultureIgnoreCase")) -and (!$source.startsWith("https://", "CurrentCultureIgnoreCase"))) { - Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi or windowsfeatures." + Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi, windowsfeatures or a custom source url." } } Elseif (!$params.source) @@ -190,6 +190,10 @@ elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source - { $expression += " -source $source" } +elseif(($source -ne $Null) -and ($source -ne "")) +{ + $expression += " -source $source" +} Set-Attr $result "chocolatey command" $expression $op_result = invoke-expression $expression From b527380c6ac0833bff84625c9d6534ac37b9fdfc Mon Sep 17 00:00:00 2001 From: Christian Thiemann Date: Sun, 24 May 2015 02:05:38 +0200 Subject: [PATCH 596/720] Fix alternatives module in non-English locale The alternatives module parses the output of update-alternatives, but the expected English phrases may not show up if the system locale is not English. Setting LC_ALL=C when invoking update-alternatives fixes this problem. --- system/alternatives.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/alternatives.py b/system/alternatives.py index c298afc2949..06d9bea25f0 100644 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -85,7 +85,7 @@ def main(): # Run `update-alternatives --display ` to find existing alternatives (rc, display_output, _) = module.run_command( - [UPDATE_ALTERNATIVES, '--display', name] + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] ) if rc == 0: @@ -106,7 +106,7 @@ def main(): # This is only compatible on Debian-based systems, as the other # alternatives don't have --query available rc, query_output, _ = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] ) if rc == 0: for line in query_output.splitlines(): From 6f2b61d2d88294ea7938020183ea613b7e5e878d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 27 May 2015 20:54:26 +0200 Subject: [PATCH 597/720] firewalld: remove BabyJSON See https://github.com/ansible/ansible-modules-extras/issues/430 --- system/firewalld.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 77cfc4b6bb8..e16e4e4a9dd 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -67,8 +67,8 @@ options: required: false default: 0 notes: - - Not tested on any debian based system. -requirements: [ firewalld >= 0.2.11 ] + - Not tested on any Debian based system. +requirements: [ 'firewalld >= 0.2.11' ] author: '"Adam Miller (@maxamillion)" ' ''' @@ -82,7 +82,6 @@ EXAMPLES = ''' import os import re -import sys try: import firewall.config @@ -90,14 +89,9 @@ try: from firewall.client import FirewallClient fw = FirewallClient() - if not fw.connected: - raise Exception('failed to connect to the firewalld daemon') + HAS_FIREWALLD = True except ImportError: - print "failed=True msg='firewalld required for this module'" - sys.exit(1) -except Exception, e: - print "failed=True msg='%s'" % str(e) - sys.exit(1) + HAS_FIREWALLD = False ################ # port handling @@ -223,6 +217,9 @@ def main(): supports_check_mode=True ) + if not HAS_FIREWALLD: + module.fail_json(msg='firewalld required for this module') + ## Pre-run version checking if FW_VERSION < "0.2.11": module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') @@ -400,6 +397,4 @@ def main(): ################################################# # import module snippets from ansible.module_utils.basic import * - main() - From 1f322876262baa8c16f8439cd909a73743bba8c4 Mon Sep 17 00:00:00 2001 From: fdupoux Date: Thu, 28 May 2015 19:46:53 +0100 Subject: [PATCH 598/720] Removed conditional assignment of yesopt to make it work with python-2.4 (to pass the Travis-CI test) --- system/lvol.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index 43511ae7b7a..c49cb369440 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -127,7 +127,10 @@ def main(): if version_found == None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option - yesopt = "--yes" if version_found >= version_yesopt else "" + if version_found >= version_yesopt: + yesopt = "--yes" + else: + yesopt = "" vg = module.params['vg'] lv = module.params['lv'] From 9d2d3f0299ad34ee8332bb179afd64bd9da245ae Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 28 May 2015 16:23:27 -0400 Subject: [PATCH 599/720] Add module to run puppet There is a growing pattern for using ansible to orchestrate runs of existing puppet code. For instance, the OpenStack Infrastructure team started using ansible for this very reason. It also turns out that successfully running puppet and interpreting success or failure is harder than you'd expect, thus warranting a module and not just a shell command. This is ported in from http://git.openstack.org/cgit/openstack-infra/ansible-puppet --- system/puppet.py | 186 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 system/puppet.py diff --git a/system/puppet.py b/system/puppet.py new file mode 100644 index 00000000000..c53c88f595d --- /dev/null +++ b/system/puppet.py @@ -0,0 +1,186 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import json +import os +import pipes + +DOCUMENTATION = ''' +--- +module: puppet +short_description: Runs puppet +description: + - Runs I(puppet) agent or apply in a reliable manner +version_added: "2.0" +options: + timeout: + description: + - How long to wait for I(puppet) to finish. + required: false + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. Must have this or manifest + required: false + default: None + manifest: + desciption: + - Path to the manifest file to run puppet apply on. Must have this or puppetmaster + required: false + default: None + show_diff: + description: + - Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default. + required: false + default: no + choices: [ "yes", "no" ] + facts: + description: + - A dict of values to pass in as persistent external facter facts + required: false + default: None + facter_basename: + desciption: + - Basename of the facter output file + required: false + default: ansible +requirements: [ puppet ] +author: Monty Taylor +''' + +EXAMPLES = ''' +# Run puppet and fail if anything goes wrong +- puppet + +# Run puppet and timeout in 5 minutes +- puppet: timeout=5m +''' + + +def _get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + with os.fdopen( + os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), + 'w') as out_file: + out_file.write(json.dumps(data).encode('utf8')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(default="30m"), + puppetmaster=dict(required=False, default=None), + manifest=dict(required=False, default=None), + show_diff=dict( + default=False, aliases=['show-diff'], type='bool'), + facts=dict(default=None), + facter_basename=dict(default='ansible'), + ), + required_one_of=[ + ('puppetmaster', 'manifest'), + ], + ) + p = module.params + + global PUPPET_CMD + PUPPET_CMD = module.get_bin_path("puppet", False) + + if not PUPPET_CMD: + module.fail_json( + msg="Could not find puppet. Please ensure it is installed.") + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest']) + + # Check if puppet is disabled here + if p['puppetmaster']: + rc, stdout, stderr = module.run_command( + PUPPET_CMD + "config print agent_disabled_lockfile") + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + if module.params['facts']: + _write_structured_data( + _get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( + timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) + + if p['puppetmaster']: + cmd = ("%(base_cmd) agent --onetime" + " --server %(puppetmaster)s" + " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" + " --detailed-exitcodes --verbose") % dict( + base_cmd=base_cmd, + puppetmaster=pipes.quote(p['puppetmaster'])) + if p['show_diff']: + cmd += " --show-diff" + else: + cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( + base_cmd=base_cmd, + manifest=pipes.quote(p['manifest'])) + rc, stdout, stderr = module.run_command(cmd) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout) + elif rc == 1: + # rc==1 could be because it's disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (cmd, rc), + stdout=stdout, stderr=stderr) + +# import module snippets +from ansible.module_utils.basic import * + +main() From a1ecd60285c91466577463069a8cf9c6739813b1 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 07:00:30 -0400 Subject: [PATCH 600/720] Fix some errors pointed out by travis --- system/puppet.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index c53c88f595d..57c76eeec9f 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -82,10 +82,10 @@ def _write_structured_data(basedir, basename, data): if not os.path.exists(basedir): os.makedirs(basedir) file_path = os.path.join(basedir, "{0}.json".format(basename)) - with os.fdopen( - os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), - 'w') as out_file: - out_file.write(json.dumps(data).encode('utf8')) + out_file = os.fdopen( + os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() def main(): @@ -116,7 +116,7 @@ def main(): if not os.path.exists(p['manifest']): module.fail_json( msg="Manifest file %(manifest)s not found." % dict( - manifest=p['manifest']) + manifest=p['manifest'])) # Check if puppet is disabled here if p['puppetmaster']: @@ -149,8 +149,8 @@ def main(): cmd += " --show-diff" else: cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( - base_cmd=base_cmd, - manifest=pipes.quote(p['manifest'])) + base_cmd=base_cmd, + manifest=pipes.quote(p['manifest']))) rc, stdout, stderr = module.run_command(cmd) if rc == 0: From e7ed08f762188244406e5fe1252c34ee64dcbfe7 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 07:06:15 -0400 Subject: [PATCH 601/720] Add support for check mode --- system/puppet.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index 57c76eeec9f..d6bc4348375 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -99,6 +99,7 @@ def main(): facts=dict(default=None), facter_basename=dict(default='ansible'), ), + supports_check_mode=True, required_one_of=[ ('puppetmaster', 'manifest'), ], @@ -129,7 +130,7 @@ def main(): module.fail_json( msg="Puppet agent state could not be determined.") - if module.params['facts']: + if module.params['facts'] and not module.check_mode: _write_structured_data( _get_facter_dir(), module.params['facter_basename'], @@ -139,7 +140,7 @@ def main(): timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) if p['puppetmaster']: - cmd = ("%(base_cmd) agent --onetime" + cmd = ("%(base_cmd)s agent --onetime" " --server %(puppetmaster)s" " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" " --detailed-exitcodes --verbose") % dict( @@ -147,10 +148,13 @@ def main(): puppetmaster=pipes.quote(p['puppetmaster'])) if p['show_diff']: cmd += " --show-diff" + if module.check_mode: + cmd += " --noop" else: - cmd = ("%(base_cmd) apply --detailed-exitcodes %(manifest)s" % dict( - base_cmd=base_cmd, - manifest=pipes.quote(p['manifest']))) + cmd = "%s apply --detailed-exitcodes " % base_cmd + if module.check_mode: + cmd += "--noop " + cmd += pipes.quote(p['manifest']) rc, stdout, stderr = module.run_command(cmd) if rc == 0: From ce93a91a590a9eddeaddcfe9601db4f7f08b1cf6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 29 May 2015 08:09:31 -0400 Subject: [PATCH 602/720] Fix octal values for python 2.4 --- system/puppet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index d6bc4348375..46a5ea58d4f 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -18,6 +18,7 @@ import json import os import pipes +import stat DOCUMENTATION = ''' --- @@ -82,8 +83,13 @@ def _write_structured_data(basedir, basename, data): if not os.path.exists(basedir): os.makedirs(basedir) file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed out_file = os.fdopen( - os.open(file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') out_file.write(json.dumps(data).encode('utf8')) out_file.close() From 93a1542cc1b0d954b8877f06ba10bd802447977c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 29 May 2015 10:07:00 +0200 Subject: [PATCH 603/720] cloudstack: improve required params --- cloud/cloudstack/cs_account.py | 3 +++ cloud/cloudstack/cs_affinitygroup.py | 3 +++ cloud/cloudstack/cs_firewall.py | 7 +++++++ cloud/cloudstack/cs_instance.py | 3 +++ cloud/cloudstack/cs_instancegroup.py | 3 +++ cloud/cloudstack/cs_iso.py | 3 +++ cloud/cloudstack/cs_portforward.py | 3 +++ cloud/cloudstack/cs_securitygroup.py | 3 +++ cloud/cloudstack/cs_securitygroup_rule.py | 4 ++++ cloud/cloudstack/cs_sshkeypair.py | 3 +++ cloud/cloudstack/cs_vmsnapshot.py | 4 ++++ 11 files changed, 39 insertions(+) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 399dfa090cc..a8510bbc5b3 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -369,6 +369,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 2a8de46fe41..9ff3b123a0c 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -223,6 +223,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index c9e42be4a4f..ef78b6a242d 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -422,6 +422,13 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_one_of = ( + ['ip_address', 'network'], + ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 1f5cc6ca393..c2c219febac 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -788,6 +788,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index d62004cc94f..9041e351539 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -200,6 +200,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 749acdf594a..4a97fc3d027 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -333,6 +333,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 123da67e2bc..47af7848ee1 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -407,6 +407,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 73a54fef795..9ef81095322 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -167,6 +167,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index ef48b3896ce..a467d3f5c38 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -402,6 +402,10 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 0d2e2c822f1..e7ee88e3bea 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -219,6 +219,9 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index b71901a317f..cadf229af55 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -292,6 +292,10 @@ def main(): api_url = dict(default=None), api_http_method = dict(default='get'), ), + required_together = ( + ['icmp_type', 'icmp_code'], + ['api_key', 'api_secret', 'api_url'], + ), supports_check_mode=True ) From 7442db3f4160aebf0ac4241e38c89ba52e6a6f4e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:24:34 +0200 Subject: [PATCH 604/720] cs_instance: improve hypervisor argument and return --- cloud/cloudstack/cs_instance.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index c2c219febac..734ffb62d46 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -326,6 +326,11 @@ tags: returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' +hypervisor: + description: Hypervisor related to this instance. + returned: success + type: string + sample: KVM ''' import base64 @@ -712,6 +717,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['account'] = instance['account'] if 'project' in instance: self.result['project'] = instance['project'] + if 'hypervisor' in instance: + self.result['hypervisor'] = instance['hypervisor'] if 'publicip' in instance: self.result['public_ip'] = instance['public_ip'] if 'passwordenabled' in instance: @@ -771,7 +778,7 @@ def main(): disk_offering = dict(default=None), disk_size = dict(type='int', default=None), keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), - hypervisor = dict(default=None), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), domain = dict(default=None), From a13a26aa2a6f52ae5da592933f2f33f2fe59c6b4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:26:00 +0200 Subject: [PATCH 605/720] cloudstack: add instance_name alias internal name to returns in cs_instance --- cloud/cloudstack/cs_instance.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 734ffb62d46..13fc57991d3 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -331,6 +331,11 @@ hypervisor: returned: success type: string sample: KVM +instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: string + sample: i-44-3992-VM ''' import base64 @@ -719,6 +724,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['project'] = instance['project'] if 'hypervisor' in instance: self.result['hypervisor'] = instance['hypervisor'] + if 'instancename' in instance: + self.result['instance_name'] = instance['instancename'] if 'publicip' in instance: self.result['public_ip'] = instance['public_ip'] if 'passwordenabled' in instance: From e143689d9c3c086319c5038de3baf09dbc803c2f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:28:06 +0200 Subject: [PATCH 606/720] cloudstack: update doc in cs_instance --- cloud/cloudstack/cs_instance.py | 36 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 13fc57991d3..c2dd45fe2b5 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' module: cs_instance short_description: Manages instances and virtual machines on Apache CloudStack based clouds. description: - - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. + - Deploy, start, restart, stop and destroy instances. version_added: '2.0' author: '"René Moser (@resmo)" ' options: @@ -49,22 +49,29 @@ options: choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] service_offering: description: - - Name or id of the service offering of the new instance. If not set, first found service offering is used. + - Name or id of the service offering of the new instance. + - If not set, first found service offering is used. required: false default: null template: description: - - Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option. + - Name or id of the template to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(ISO) option. required: false default: null iso: description: - - Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option. + - Name or id of the ISO to be used for creating the new instance. + - Required when using C(state=present). + - Mutually exclusive with C(template) option. required: false default: null hypervisor: description: - - Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used. + - Name the hypervisor to be used for creating the new instance. + - Relevant when using C(state=present) and option C(ISO) is used. + - If not set, first found hypervisor will be used. required: false default: null choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] @@ -82,7 +89,7 @@ options: aliases: [ 'network' ] ip_address: description: - - IPv4 address for default instance's network during creation + - IPv4 address for default instance's network during creation. required: false default: null ip6_address: @@ -123,7 +130,8 @@ options: default: null zone: description: - - Name of the zone in which the instance shoud be deployed. If not set, default zone is used. + - Name of the zone in which the instance shoud be deployed. + - If not set, default zone is used. required: false default: null ssh_key: @@ -164,7 +172,7 @@ extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' -# Create a instance on CloudStack from an ISO +# Create a instance from an ISO # NOTE: Names of offerings and ISOs depending on the CloudStack configuration. - local_action: module: cs_instance @@ -181,7 +189,6 @@ EXAMPLES = ''' - Sync Integration - Storage Integration - # For changing a running instance, use the 'force' parameter - local_action: module: cs_instance @@ -191,7 +198,6 @@ EXAMPLES = ''' service_offering: 2cpu_2gb force: yes - # Create or update a instance on Exoscale's public cloud - local_action: module: cs_instance @@ -202,19 +208,13 @@ EXAMPLES = ''' tags: - { key: admin, value: john } - { key: foo, value: bar } - register: vm - -- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}' - # Ensure a instance has stopped - local_action: cs_instance name=web-vm-1 state=stopped - # Ensure a instance is running - local_action: cs_instance name=web-vm-1 state=started - # Remove a instance - local_action: cs_instance name=web-vm-1 state=absent ''' @@ -257,7 +257,7 @@ password: type: string sample: Ge2oe7Do ssh_key: - description: Name of ssh key deployed to instance. + description: Name of SSH key deployed to instance. returned: success type: string sample: key@work @@ -282,7 +282,7 @@ default_ip: type: string sample: 10.23.37.42 public_ip: - description: Public IP address with instance via static nat rule. + description: Public IP address with instance via static NAT rule. returned: success type: string sample: 1.2.3.4 From 01caf84227afc7a100e64017bce380b86819fd18 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 00:46:20 +0200 Subject: [PATCH 607/720] cloudstack: update doc of cs_portforward, fixes typos. --- cloud/cloudstack/cs_portforward.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 47af7848ee1..cbd363f69e6 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -92,12 +92,13 @@ options: default: null project: description: - - Name of the project the c(vm) is located in. + - Name of the project the C(vm) is located in. required: false default: null zone: description: - - Name of the zone in which the virtual machine is in. If not set, default zone is used. + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. required: false default: null poll_async: @@ -117,7 +118,6 @@ EXAMPLES = ''' public_port: 80 private_port: 8080 - # forward SSH and open firewall - local_action: module: cs_portforward @@ -127,7 +127,6 @@ EXAMPLES = ''' private_port: 22 open_firewall: true - # forward DNS traffic, but do not open firewall - local_action: module: cs_portforward @@ -138,7 +137,6 @@ EXAMPLES = ''' protocol: udp open_firewall: true - # remove ssh port forwarding - local_action: module: cs_portforward @@ -161,26 +159,26 @@ protocol: type: string sample: tcp private_port: - description: Private start port. + description: Start port on the virtual machine's IP address. returned: success type: int sample: 80 private_end_port: - description: Private end port. + description: End port on the virtual machine's IP address. returned: success type: int public_port: - description: Public start port. + description: Start port on the public IP address. returned: success type: int sample: 80 public_end_port: - description: Public end port. + description: End port on the public IP address. returned: success type: int sample: 80 tags: - description: Tag srelated to the port forwarding. + description: Tags related to the port forwarding. returned: success type: list sample: [] @@ -201,7 +199,6 @@ vm_guest_ip: sample: 10.101.65.152 ''' - try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True From 80663e0fbeaf081d25b3d5691e2bc8f12b86ac8c Mon Sep 17 00:00:00 2001 From: mlamatr Date: Fri, 29 May 2015 23:18:44 -0400 Subject: [PATCH 608/720] corrected typo in URL for consul.io --- clustering/consul.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul.py b/clustering/consul.py index 0baaae83b84..8423ffe418f 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ module: consul short_description: "Add, modify & delete services within a consul cluster. - See http://conul.io for more details." + See http://consul.io for more details." description: - registers services and checks for an agent with a consul cluster. A service is some process running on the agent node that should be advertised by From eb66f683f538c1fe73902e246de7e8d3bec29c5d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:03:32 +0200 Subject: [PATCH 609/720] cloudstack: add new param api_timeout --- cloud/cloudstack/cs_account.py | 1 + cloud/cloudstack/cs_affinitygroup.py | 1 + cloud/cloudstack/cs_firewall.py | 1 + cloud/cloudstack/cs_instance.py | 1 + cloud/cloudstack/cs_instancegroup.py | 1 + cloud/cloudstack/cs_iso.py | 1 + cloud/cloudstack/cs_portforward.py | 1 + cloud/cloudstack/cs_securitygroup.py | 1 + cloud/cloudstack/cs_securitygroup_rule.py | 1 + cloud/cloudstack/cs_sshkeypair.py | 1 + cloud/cloudstack/cs_vmsnapshot.py | 1 + 11 files changed, 11 insertions(+) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index a8510bbc5b3..dc845acbae2 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -368,6 +368,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 9ff3b123a0c..afb60a83baa 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -222,6 +222,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index ef78b6a242d..fca8e88a509 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -421,6 +421,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_one_of = ( ['ip_address', 'network'], diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index c2dd45fe2b5..b6f2d098346 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -801,6 +801,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 9041e351539..01630bc225f 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -199,6 +199,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 4a97fc3d027..f38faeceeb4 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -332,6 +332,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index cbd363f69e6..e3a456e424b 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -403,6 +403,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 9ef81095322..8f1592ca43a 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -166,6 +166,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index a467d3f5c38..7afb1463503 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -401,6 +401,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['icmp_type', 'icmp_code'], diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index e7ee88e3bea..b4b764dbe33 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -218,6 +218,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index cadf229af55..218a947ac5a 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -291,6 +291,7 @@ def main(): api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(default='get'), + api_timeout = dict(type='int', default=10), ), required_together = ( ['icmp_type', 'icmp_code'], From 53130de66267068b6dc607f4a44200eee1581f96 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:05:03 +0200 Subject: [PATCH 610/720] cloudstack: add choices for api_http_method --- cloud/cloudstack/cs_account.py | 6 +----- cloud/cloudstack/cs_affinitygroup.py | 3 +-- cloud/cloudstack/cs_firewall.py | 6 +----- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_instancegroup.py | 3 +-- cloud/cloudstack/cs_iso.py | 5 +---- cloud/cloudstack/cs_portforward.py | 2 +- cloud/cloudstack/cs_securitygroup.py | 3 +-- cloud/cloudstack/cs_securitygroup_rule.py | 6 +----- cloud/cloudstack/cs_sshkeypair.py | 2 +- cloud/cloudstack/cs_vmsnapshot.py | 4 +--- 11 files changed, 11 insertions(+), 31 deletions(-) diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index dc845acbae2..597e4c7394e 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -108,7 +108,6 @@ local_action: email: john.doe@example.com domain: CUSTOMERS - # Lock an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -116,7 +115,6 @@ local_action: domain: CUSTOMERS state: locked - # Disable an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -124,7 +122,6 @@ local_action: domain: CUSTOMERS state: disabled - # Enable an existing account in domain 'CUSTOMERS' local_action: module: cs_account @@ -132,7 +129,6 @@ local_action: domain: CUSTOMERS state: enabled - # Remove an account in domain 'CUSTOMERS' local_action: module: cs_account @@ -367,7 +363,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index afb60a83baa..40896942cb1 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -72,7 +72,6 @@ EXAMPLES = ''' name: haproxy affinty_type: host anti-affinity - # Remove a affinity group - local_action: module: cs_affinitygroup @@ -221,7 +220,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index fca8e88a509..828aa1faf98 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -115,7 +115,6 @@ EXAMPLES = ''' port: 80 cidr: 1.2.3.4/32 - # Allow inbound tcp/udp port 53 to 4.3.2.1 - local_action: module: cs_firewall @@ -126,7 +125,6 @@ EXAMPLES = ''' - tcp - udp - # Ensure firewall rule is removed - local_action: module: cs_firewall @@ -136,7 +134,6 @@ EXAMPLES = ''' cidr: 17.0.0.0/8 state: absent - # Allow all outbound traffic - local_action: module: cs_firewall @@ -144,7 +141,6 @@ EXAMPLES = ''' type: egress protocol: all - # Allow only HTTP outbound traffic for an IP - local_action: module: cs_firewall @@ -420,7 +416,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_one_of = ( diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index b6f2d098346..05cdc960e95 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -800,7 +800,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 01630bc225f..396cafa388d 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -61,7 +61,6 @@ EXAMPLES = ''' module: cs_instancegroup name: loadbalancers - # Remove an instance group - local_action: module: cs_instancegroup @@ -198,7 +197,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index f38faeceeb4..77ce85b505e 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -116,7 +116,6 @@ EXAMPLES = ''' url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: Debian GNU/Linux 7(64-bit) - # Register an ISO with given name if ISO md5 checksum does not already exist. - local_action: module: cs_iso @@ -125,14 +124,12 @@ EXAMPLES = ''' os_type: checksum: 0b31bccccb048d20b551f70830bb7ad0 - # Remove an ISO by name - local_action: module: cs_iso name: Debian 7 64-bit state: absent - # Remove an ISO by checksum - local_action: module: cs_iso @@ -331,7 +328,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index e3a456e424b..00b084d9195 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -402,7 +402,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 8f1592ca43a..08fb72c821d 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -57,7 +57,6 @@ EXAMPLES = ''' name: default description: default security group - # Remove a security group - local_action: module: cs_securitygroup @@ -165,7 +164,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 7afb1463503..9252e06ce62 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -102,7 +102,6 @@ EXAMPLES = ''' port: 80 cidr: 1.2.3.4/32 - # Allow tcp/udp outbound added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -115,7 +114,6 @@ EXAMPLES = ''' - tcp - udp - # Allow inbound icmp from 0.0.0.0/0 added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -124,7 +122,6 @@ EXAMPLES = ''' icmp_code: -1 icmp_type: -1 - # Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' - local_action: module: cs_securitygroup_rule @@ -132,7 +129,6 @@ EXAMPLES = ''' port: 80 state: absent - # Allow inbound port 80/tcp from security group web added to security group 'default' - local_action: module: cs_securitygroup_rule @@ -400,7 +396,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index b4b764dbe33..0a54a1971bc 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -217,7 +217,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 218a947ac5a..fb7668640dc 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -88,7 +88,6 @@ EXAMPLES = ''' vm: web-01 snapshot_memory: yes - # Revert a VM to a snapshot after a failed upgrade - local_action: module: cs_vmsnapshot @@ -96,7 +95,6 @@ EXAMPLES = ''' vm: web-01 state: revert - # Remove a VM snapshot after successful upgrade - local_action: module: cs_vmsnapshot @@ -290,7 +288,7 @@ def main(): api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), - api_http_method = dict(default='get'), + api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( From 71066331f31c8619f0bdcf8029e56a27979644c6 Mon Sep 17 00:00:00 2001 From: Q Date: Sat, 30 May 2015 23:01:52 +1000 Subject: [PATCH 611/720] Update patch.py --- files/patch.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/files/patch.py b/files/patch.py index c2982e2380e..0932ed3556a 100644 --- a/files/patch.py +++ b/files/patch.py @@ -65,6 +65,13 @@ options: required: false type: "int" default: "0" + backup_copy: + description: + - passes --backup --version-control=numbered to patch, + producing numbered backup copies + required: false + type: "bool" + default: "False" note: - This module requires GNU I(patch) utility to be installed on the remote host. ''' @@ -101,7 +108,7 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0) return rc == 0 -def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False): +def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False): opts = ['--quiet', '--forward', '--batch', '--reject-file=-', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] @@ -109,6 +116,8 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru opts.append('--dry-run') if dest_file: opts.append("'%s'" % dest_file) + if backup: + opts.append('--backup --version-control=numbered') (rc, out, err) = patch_func(opts) if rc != 0: @@ -124,6 +133,8 @@ def main(): 'basedir': {}, 'strip': {'default': 0, 'type': 'int'}, 'remote_src': {'default': False, 'type': 'bool'}, + # don't call it "backup" since the semantics differs from the default one + 'backup_copy': { 'default': False, 'type': 'bool' } }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -156,8 +167,8 @@ def main(): changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: - apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, - dry_run=module.check_mode) + apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + dry_run=module.check_mode, backup=p.backup_copy ) changed = True except PatchError, e: module.fail_json(msg=str(e)) From 79a5ea2ca61ffb9ec6dec0113c1cc004f935189a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:05:36 +0200 Subject: [PATCH 612/720] cloudstack: fix examples in cs_iso --- cloud/cloudstack/cs_iso.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 77ce85b505e..d9ec6880627 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -121,7 +121,7 @@ EXAMPLES = ''' module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso - os_type: + os_type: Debian GNU/Linux 7(64-bit) checksum: 0b31bccccb048d20b551f70830bb7ad0 # Remove an ISO by name From 421b3ff24ebe240054f172d396c869f51cf08ae1 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 18:28:41 +0200 Subject: [PATCH 613/720] cloudstack: fix doc for cs_instance, force is defaulted to false --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 05cdc960e95..46fd66f510d 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -156,7 +156,7 @@ options: description: - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. required: false - default: true + default: false tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). From e1006eb9077baad7d8c79b32608254269b2fd4ad Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 22:54:56 +0200 Subject: [PATCH 614/720] cloudstack: add new module cs_project --- cloud/cloudstack/cs_project.py | 342 +++++++++++++++++++++++++++++++++ 1 file changed, 342 insertions(+) create mode 100644 cloud/cloudstack/cs_project.py diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py new file mode 100644 index 00000000000..b604a1b6f32 --- /dev/null +++ b/cloud/cloudstack/cs_project.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_project +short_description: Manages projects on Apache CloudStack based clouds. +description: + - Create, update, suspend, activate and remove projects. +version_added: '2.0' +author: '"René Moser (@resmo)" ' + name: + description: + - Name of the project. + required: true + displaytext: + description: + - Displaytext of the project. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + state: + description: + - State of the project. + required: false + default: 'present' + choices: [ 'present', 'absent', 'active', 'suspended' ] + domain: + description: + - Domain the project is related to. + required: false + default: null + account: + description: + - Account the project is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a project +- local_action: + module: cs_project + name: web + +# Rename a project +- local_action: + module: cs_project + name: web + displaytext: my web project + +# Suspend an existing project +- local_action: + module: cs_project + name: web + state: suspended + +# Activate an existing project +- local_action: + module: cs_project + name: web + state: active + +# Remove a project +- local_action: + module: cs_project + name: web + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the project. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the project. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the project. + returned: success + type: string + sample: web project +state: + description: State of the project. + returned: success + type: string + sample: Active +domain: + description: Domain the project is related to. + returned: success + type: string + sample: example domain +account: + description: Account the project is related to. + returned: success + type: string + sample: example account +tags: + description: List of resource tags associated with the project. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackProject(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.project = None + + + def get_displaytext(self): + displaytext = self.module.params.get('displaytext') + if not displaytext: + displaytext = self.module.params.get('name') + return displaytext + + + def get_project(self): + if not self.project: + project = self.module.params.get('name') + + args = {} + args['listall'] = True + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + projects = self.cs.listProjects(**args) + if projects: + for p in projects['project']: + if project in [ p['name'], p['id']]: + self.project = p + break + return self.project + + + def present_project(self): + project = self.get_project() + if not project: + project = self.create_project(project) + else: + project = self.update_project(project) + return project + + + def update_project(self, project): + args = {} + args['id'] = project['id'] + args['displaytext'] = self.get_displaytext() + + if self._has_changed(args, project): + self.result['changed'] = True + if not self.module.check_mode: + project = self.cs.updateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def create_project(self, project): + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_displaytext() + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + + if not self.module.check_mode: + project = self.cs.createProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def state_project(self, state=None): + project = self.get_project() + + if not project: + self.module.fail_json(msg="No project named '%s' found." % self.module.params('name')) + + if project['state'].lower() != state: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + if state == 'suspended': + project = self.cs.suspendProject(**args) + else: + project = self.cs.activateProject(**args) + + if 'errortext' in project: + self.module.fail_json(msg="Failed: '%s'" % project['errortext']) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self._poll_job(project, 'project') + return project + + + def absent_project(self): + project = self.get_project() + if project: + self.result['changed'] = True + + args = {} + args['id'] = project['id'] + + if not self.module.check_mode: + res = self.cs.deleteProject(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'project') + return project + + + def get_result(self, project): + if project: + if 'name' in project: + self.result['name'] = project['name'] + if 'displaytext' in project: + self.result['displaytext'] = project['displaytext'] + if 'account' in project: + self.result['account'] = project['account'] + if 'domain' in project: + self.result['domain'] = project['domain'] + if 'state' in project: + self.result['state'] = project['state'] + if 'tags' in project: + self.result['tags'] = [] + for tag in project['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_project = AnsibleCloudStackProject(module) + + state = module.params.get('state') + if state in ['absent']: + project = acs_project.absent_project() + + elif state in ['active', 'suspended']: + project = acs_project.state_project(state=state) + + else: + project = acs_project.present_project() + + result = acs_project.get_result(project) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 1bc3e10e77b565d5e231f25d8f0be205436fd6cd Mon Sep 17 00:00:00 2001 From: fdupoux Date: Sun, 31 May 2015 12:38:45 +0100 Subject: [PATCH 615/720] Devices in the current_devs list must also be converted to absolute device paths so comparison with dev_list works --- system/lvg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvg.py b/system/lvg.py index 955b94668dc..3c6c5ef2930 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -211,7 +211,7 @@ def main(): module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) ### resize VG - current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] + current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) From a77de166c4621a39316ca35fe35c8fc6c2a28e0f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 1 Jun 2015 08:59:50 -0400 Subject: [PATCH 616/720] Add new policy guidelines for Extras More to do here, but this is a start. --- CONTRIBUTING.md | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..38b95840a77 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,28 +1,37 @@ -Welcome To Ansible GitHub -========================= +Contributing to ansible-modules-extras +====================================== -Hi! Nice to see you here! +The Ansible Extras Modules are written and maintained by the Ansible community, according to the following contribution guidelines. + +If you'd like to contribute code +================================ + +Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. + +If you'd like to contribute code to an existing module +====================================================== +Each module in Extras is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + +If you'd like to contribute a new module +======================================== +Ansible welcomes new modules. Please be certain that you've read the [module development guide and standards](http://docs.ansible.com/developing_modules.html) thoroughly before submitting your module. + +Each new module requires two current module owners to approve a new module for inclusion. The Ansible community reviews new modules as often as possible, but please be patient; there are a lot of new module submissions in the pipeline, and it takes time to evaluate a new module for its adherence to module standards. + +Once your module is accepted, you become responsible for maintenance of that module, which means responding to pull requests and issues in a reasonably timely manner. If you'd like to ask a question =============================== Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. -The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. - -If you'd like to contribute code -================================ - -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. +The Github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. If you'd like to file a bug =========================== -I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. +Read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. Also please make sure you are testing on the latest released version of Ansible or the development branch. Thanks! - - - From 432477c14c0a34b01d6e8ee959b6bbf44156cfc2 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 1 Jun 2015 12:07:23 -0400 Subject: [PATCH 617/720] Revert "Added eval for pasting tag lists" --- monitoring/datadog_event.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index bde5cd80069..5319fcb0f1b 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -116,10 +116,7 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - if module.params['tags'].startswith("[") and module.params['tags'].endswith("]"): - body['tags'] = eval(module.params['tags']) - else: - body['tags'] = module.params['tags'].split(",") + body['tags'] = module.params['tags'].split(",") if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From 04e43a9dcb52d590c3d7e4a18170bea4b315c2a6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 12:31:20 -0400 Subject: [PATCH 618/720] added version added --- monitoring/nagios.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 5fd51d17123..38a1f8c161a 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -53,6 +53,7 @@ options: required: false default: Ansible comment: + version_added: "2.0" description: - Comment for C(downtime) action. required: false From d5c581e9ebcf1d2c25baddfb9f845c5357677b21 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 12:36:49 -0400 Subject: [PATCH 619/720] updated docs for 2.0 --- monitoring/nagios.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 38a1f8c161a..543f094b70e 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -30,6 +30,7 @@ options: action: description: - Action to take. + - servicegroup options were added in 2.0. required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", @@ -73,6 +74,7 @@ options: required: true default: null servicegroup: + version_added: "2.0" description: - the Servicegroup we want to set downtimes/alerts for. B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). From 01551a8c15129a48100e1e3707e285ead061a2a9 Mon Sep 17 00:00:00 2001 From: David Wittman Date: Tue, 21 Oct 2014 16:56:13 -0500 Subject: [PATCH 620/720] [lvol] Add opts parameter Adds the ability to set options to be passed to the lvcreate command using the `opts` parameter. --- system/lvol.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index c49cb369440..d807f9e8336 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -57,6 +57,10 @@ options: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. required: false + opts: + version_added: "1.9" + description: + - Free-form options to be passed to the lvcreate command notes: - Filesystems on top of the volume are not resized. ''' @@ -71,6 +75,9 @@ EXAMPLES = ''' # Create a logical volume the size of all remaining space in the volume group - lvol: vg=firefly lv=test size=100%FREE +# Create a logical volume with special options +- lvol: vg=firefly lv=test size=512g opts="-r 16" + # Extend the logical volume to 1024m. - lvol: vg=firefly lv=test size=1024 @@ -116,6 +123,7 @@ def main(): vg=dict(required=True), lv=dict(required=True), size=dict(), + opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), @@ -135,11 +143,15 @@ def main(): vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] + opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) size_opt = 'L' size_unit = 'm' + if opts is None: + opts = "" + if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: @@ -212,7 +224,8 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s %s -n %s -%s %s%s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, vg)) + cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg) + rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: From eec8dca006ea0f14449de496821ea320ad74c4bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 15:27:55 -0400 Subject: [PATCH 621/720] added copyright/license info to modules I had missed --- notification/jabber.py | 18 ++++++++++++++++++ system/svc.py | 17 +++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/notification/jabber.py b/notification/jabber.py index 466c72d1570..1a19140a83d 100644 --- a/notification/jabber.py +++ b/notification/jabber.py @@ -1,5 +1,23 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see + DOCUMENTATION = ''' --- diff --git a/system/svc.py b/system/svc.py index 0227a69ecd8..9831ce42ea7 100644 --- a/system/svc.py +++ b/system/svc.py @@ -1,5 +1,22 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see DOCUMENTATION = ''' --- From 37db61923420e9be68e1baba7ebda5a550dcd61e Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Mon, 1 Jun 2015 15:15:37 -0500 Subject: [PATCH 622/720] lxc_container: remove BabyJSON Removed the usage of baby json. This is in response to the fact that the baby json functionality was removed in Ansible 1.8 Ref: https://github.com/ansible/ansible-modules-extras/issues/430 --- cloud/lxc/lxc_container.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 119d45069c3..b2dba2111e4 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -383,9 +383,7 @@ EXAMPLES = """ try: import lxc except ImportError: - msg = 'The lxc module is not importable. Check the requirements.' - print("failed=True msg='%s'" % msg) - raise SystemExit(msg) + HAS_LXC = False # LXC_COMPRESSION_MAP is a map of available compression types when creating @@ -1706,6 +1704,11 @@ def main(): supports_check_mode=False, ) + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + lv_name = module.params.get('lv_name') if not lv_name: module.params['lv_name'] = module.params.get('name') From 391df0ffe0f7fbf622b0e9261a8fb32e2849a67a Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Mon, 1 Jun 2015 15:31:56 -0500 Subject: [PATCH 623/720] Updates the doc information for the python2-lxc dep The python2-lxc library has been uploaded to pypi as such this commit updates the requirements and doc information for the module such that it instructs the user to install the pip package "lxc-python2" while also noting that the package could be gotten from source as well. In the update comments have been added to the requirements list which notes where the package should come from, Closes-Bug: https://github.com/ansible/ansible-modules-extras/issues/550 --- cloud/lxc/lxc_container.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index b2dba2111e4..18555e2e351 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -173,9 +173,9 @@ options: - list of 'key=value' options to use when configuring a container. required: false requirements: - - 'lxc >= 1.0' - - 'python >= 2.6' - - 'python2-lxc >= 0.1' + - 'lxc >= 1.0 # OS package' + - 'python >= 2.6 # OS Package' + - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' notes: - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module will @@ -195,7 +195,8 @@ notes: creating the archive. - If your distro does not have a package for "python2-lxc", which is a requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" + "https://github.com/lxc/python2-lxc" or installed via pip using the package + name lxc-python2. """ EXAMPLES = """ From 927d490f7644f0bbda3cd167f66c2d74ccb68c5f Mon Sep 17 00:00:00 2001 From: Q Date: Tue, 2 Jun 2015 13:32:22 +1000 Subject: [PATCH 624/720] patch module: 'backup_copy' parameter renamed to 'backup' --- files/patch.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/files/patch.py b/files/patch.py index 0932ed3556a..085784e7de5 100644 --- a/files/patch.py +++ b/files/patch.py @@ -65,7 +65,7 @@ options: required: false type: "int" default: "0" - backup_copy: + backup: description: - passes --backup --version-control=numbered to patch, producing numbered backup copies @@ -133,8 +133,9 @@ def main(): 'basedir': {}, 'strip': {'default': 0, 'type': 'int'}, 'remote_src': {'default': False, 'type': 'bool'}, - # don't call it "backup" since the semantics differs from the default one - 'backup_copy': { 'default': False, 'type': 'bool' } + # NB: for 'backup' parameter, semantics is slightly different from standard + # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") + 'backup': { 'default': False, 'type': 'bool' } }, required_one_of=[['dest', 'basedir']], supports_check_mode=True @@ -168,7 +169,7 @@ def main(): if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, - dry_run=module.check_mode, backup=p.backup_copy ) + dry_run=module.check_mode, backup=p.backup ) changed = True except PatchError, e: module.fail_json(msg=str(e)) From 759a7d84dccb60be4b9a2134e5779d3f834f65f7 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Tue, 2 Jun 2015 09:25:55 +0100 Subject: [PATCH 625/720] Add GPL notices --- cloud/webfaction/webfaction_app.py | 21 ++++++++++++++++++++- cloud/webfaction/webfaction_db.py | 23 +++++++++++++++++++++-- cloud/webfaction/webfaction_domain.py | 21 ++++++++++++++++++++- cloud/webfaction/webfaction_mailbox.py | 20 +++++++++++++++++++- cloud/webfaction/webfaction_site.py | 21 ++++++++++++++++++++- 5 files changed, 100 insertions(+), 6 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 20e94a7b5f6..55599bdcca6 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -1,10 +1,29 @@ #! /usr/bin/python +# # Create a Webfaction application using Ansible and the Webfaction API # # Valid application types can be found by looking here: # http://docs.webfaction.com/xmlrpc-api/apps.html#application-types # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 784477c5409..a9ef88b943e 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -1,7 +1,26 @@ #! /usr/bin/python -# Create webfaction database using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# Create a webfaction database using Ansible and the Webfaction API +# +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index c99a0f23f6d..f2c95897bc5 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -1,7 +1,26 @@ #! /usr/bin/python +# # Create Webfaction domains and subdomains using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 87ca1fd1a26..976a428f3d3 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -1,7 +1,25 @@ #! /usr/bin/python +# # Create webfaction mailbox using Ansible and the Webfaction API # -# Quentin Stafford-Fraser and Andy Baker 2015 +# ------------------------------------------ +# (c) Quentin Stafford-Fraser and Andy Baker 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index a5be4f5407b..223458faf46 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -1,7 +1,26 @@ #! /usr/bin/python +# # Create Webfaction website using Ansible and the Webfaction API # -# Quentin Stafford-Fraser 2015 +# ------------------------------------------ +# +# (c) Quentin Stafford-Fraser 2015 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# DOCUMENTATION = ''' --- From 078dc8b205357115c60910e062f5a82fa5e50cfa Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 16:26:32 +0600 Subject: [PATCH 626/720] Added proxmox_template module --- cloud/misc/proxmox_template.py | 245 +++++++++++++++++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 cloud/misc/proxmox_template.py diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py new file mode 100644 index 00000000000..d07a406122c --- /dev/null +++ b/cloud/misc/proxmox_template.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: proxmox_template +short_description: management of OS templates in Proxmox VE cluster +description: + - allows you to list/upload/delete templates in Proxmox VE cluster +version_added: "2.0" +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + default: null + required: false + https_verify_ssl: + description: + - enable / disable https certificate verification + default: false + required: false + type: boolean + node: + description: + - Proxmox VE node, when you will operate with template + default: null + required: true + src: + description: + - path to uploaded file + - required only for C(state=present) + default: null + required: false + aliases: ['path'] + template: + description: + - the template name + - required only for states C(absent), C(info) + default: null + required: false + content_type: + description: + - content type + - required only for C(state=present) + default: 'vztmpl' + required: false + choices: ['vztmpl', 'iso'] + storage: + description: + - target storage + default: 'local' + required: false + type: string + timeout: + description: + - timeout for operations + default: 300 + required: false + type: integer + force: + description: + - can be used only with C(state=present), exists template will be overwritten + default: false + required: false + type: boolean + state: + description: + - Indicate desired state of the template + choices: ['present', 'absent', 'list'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: "Sergei Antipov @UnderGreen" +''' + +EXAMPLES = ''' +# Upload new openvz template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' + +# Upload new openvz template with all options and force overwrite +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes + +# Delete template with minimal options +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent + +# List content of storage(it returns list of dicts) +- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' state=list +''' + +import os +import time + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +def get_template(proxmox, node, storage, content_type, template): + return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get() + if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ] + +def get_content(proxmox, node, storage): + return proxmox.nodes(node).storage(storage).content.get() + +def upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) + while timeout: + task_status = proxmox.nodes(node).tasks(taskid).status.get() + if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s' + % proxmox.node(node).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def delete_template(module, proxmox, node, storage, content_type, template, timeout): + volid = '%s:%s/%s' % (storage, content_type, template) + proxmox.nodes(node).storage(storage).content.delete(volid) + while timeout: + if not get_template(proxmox, node, storage, content_type, template): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for deleting template.') + + time.sleep(1) + return False + +def main(): + module = AnsibleModule( + argument_spec = dict( + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(no_log=True), + https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + node = dict(), + src = dict(), + template = dict(), + content_type = dict(default='vztmpl', choices=['vztmpl','iso']), + storage = dict(default='local'), + timeout = dict(type='int', default=300), + force = dict(type='bool', choices=BOOLEANS, default='no'), + state = dict(default='present', choices=['present', 'absent', 'list']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + https_verify_ssl = module.params['https_verify_ssl'] + node = module.params['node'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError, e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + except Exception, e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + content_type = module.params['content_type'] + src = module.params['src'] + + from ansible import utils + realpath = utils.path_dwim(None, src) + template = os.path.basename(realpath) + if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) + elif not src: + module.fail_json(msg='src param to uploading template file is mandatory') + elif not (os.path.exists(realpath) and os.path.isfile(realpath)): + module.fail_json(msg='template file on path %s not exists' % realpath) + + if upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) + + elif state == 'absent': + try: + content_type = module.params['content_type'] + template = module.params['template'] + + if not template: + module.fail_json(msg='template param is mandatory') + elif not get_template(proxmox, node, storage, content_type, template): + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) + + if delete_template(module, proxmox, node, storage, content_type, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) + except Exception, e: + module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) + + elif state == 'list': + try: + + module.exit_json(changed=False, templates=get_content(proxmox, node, storage)) + except Exception, e: + module.fail_json(msg="listing of templates %s failed with exception: %s" % ( template, e )) + +# import module snippets +from ansible.module_utils.basic import * +main() From 08a9096c7512a4f00db3a3a6d9b18e4de2b35cac Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 18:21:36 +0600 Subject: [PATCH 627/720] proxmox_template | fixed problem with uploading --- cloud/misc/proxmox_template.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index d07a406122c..b1d94d96234 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -129,10 +129,10 @@ def get_template(proxmox, node, storage, content_type, template): def get_content(proxmox, node, storage): return proxmox.nodes(node).storage(storage).content.get() -def upload_template(module, proxmox, node, storage, content_type, realpath, timeout): +def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) while timeout: - task_status = proxmox.nodes(node).tasks(taskid).status.get() + task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get() if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': return True timeout = timeout - 1 @@ -213,7 +213,7 @@ def main(): elif not (os.path.exists(realpath) and os.path.isfile(realpath)): module.fail_json(msg='template file on path %s not exists' % realpath) - if upload_template(module, proxmox, node, storage, content_type, realpath, timeout): + if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) except Exception, e: module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) From 6050cc8e5d41363b62be3f369afe18584ac1aea6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 08:37:45 -0400 Subject: [PATCH 628/720] push list nature of tags into spec to allow both for comma delimited strings and actual lists --- monitoring/datadog_event.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 5319fcb0f1b..d363f8b17dc 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -86,7 +86,7 @@ def main(): priority=dict( required=False, default='normal', choices=['normal', 'low'] ), - tags=dict(required=False, default=None), + tags=dict(required=False, default=None, type='list'), alert_type=dict( required=False, default='info', choices=['error', 'warning', 'info', 'success'] @@ -116,7 +116,7 @@ def post_event(module): if module.params['date_happened'] != None: body['date_happened'] = module.params['date_happened'] if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") + body['tags'] = module.params['tags'] if module.params['aggregation_key'] != None: body['aggregation_key'] = module.params['aggregation_key'] if module.params['source_type_name'] != None: From a38b8205d207fcc1d69a00f7c77d28239f185cc6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 08:48:20 -0400 Subject: [PATCH 629/720] added version added to patch's bacukp --- files/patch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/patch.py b/files/patch.py index 085784e7de5..c1a61ce733f 100644 --- a/files/patch.py +++ b/files/patch.py @@ -66,6 +66,7 @@ options: type: "int" default: "0" backup: + version_added: "2.0" description: - passes --backup --version-control=numbered to patch, producing numbered backup copies From e1c8cdc39d87549183cf880f4df44d0ee87e11de Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:26:32 +0600 Subject: [PATCH 630/720] proxmox_template | changed http_verify_ssl to validate_certs --- cloud/misc/proxmox_template.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index b1d94d96234..4bf71f62b12 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -36,7 +36,7 @@ options: - you can use PROXMOX_PASSWORD environment variable default: null required: false - https_verify_ssl: + validate_certs: description: - enable / disable https certificate verification default: false @@ -162,7 +162,7 @@ def main(): api_host = dict(required=True), api_user = dict(required=True), api_password = dict(no_log=True), - https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), node = dict(), src = dict(), template = dict(), @@ -181,7 +181,7 @@ def main(): api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] - https_verify_ssl = module.params['https_verify_ssl'] + validate_certs = module.params['validate_certs'] node = module.params['node'] storage = module.params['storage'] timeout = module.params['timeout'] @@ -194,7 +194,7 @@ def main(): module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: - proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) except Exception, e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) From 5f916ac4e36320a9dd31dbe7226e34479a0663af Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:29:19 +0600 Subject: [PATCH 631/720] proxmox_template | deleted state=list and changed default timeout to 30 --- cloud/misc/proxmox_template.py | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index 4bf71f62b12..7fed47f7260 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' module: proxmox_template short_description: management of OS templates in Proxmox VE cluster description: - - allows you to list/upload/delete templates in Proxmox VE cluster + - allows you to upload/delete templates in Proxmox VE cluster version_added: "2.0" options: api_host: @@ -76,7 +76,7 @@ options: timeout: description: - timeout for operations - default: 300 + default: 30 required: false type: integer force: @@ -88,7 +88,7 @@ options: state: description: - Indicate desired state of the template - choices: ['present', 'absent', 'list'] + choices: ['present', 'absent'] default: present notes: - Requires proxmoxer and requests modules on host. This modules can be installed with pip. @@ -108,9 +108,6 @@ EXAMPLES = ''' # Delete template with minimal options - proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent - -# List content of storage(it returns list of dicts) -- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' state=list ''' import os @@ -126,9 +123,6 @@ def get_template(proxmox, node, storage, content_type, template): return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get() if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ] -def get_content(proxmox, node, storage): - return proxmox.nodes(node).storage(storage).content.get() - def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath)) while timeout: @@ -168,9 +162,9 @@ def main(): template = dict(), content_type = dict(default='vztmpl', choices=['vztmpl','iso']), storage = dict(default='local'), - timeout = dict(type='int', default=300), + timeout = dict(type='int', default=30), force = dict(type='bool', choices=BOOLEANS, default='no'), - state = dict(default='present', choices=['present', 'absent', 'list']), + state = dict(default='present', choices=['present', 'absent']), ) ) @@ -233,13 +227,6 @@ def main(): except Exception, e: module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) - elif state == 'list': - try: - - module.exit_json(changed=False, templates=get_content(proxmox, node, storage)) - except Exception, e: - module.fail_json(msg="listing of templates %s failed with exception: %s" % ( template, e )) - # import module snippets from ansible.module_utils.basic import * main() From 35853a3d70310e30a80fe968f7274dc736a61dc9 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 2 Jun 2015 22:53:47 +0600 Subject: [PATCH 632/720] proxmox | changed https_verify_ssl to to validate_certs and added forgotten return --- cloud/misc/proxmox.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index f3ee1962891..7be4361edbe 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -41,7 +41,7 @@ options: - the instance id default: null required: true - https_verify_ssl: + validate_certs: description: - enable / disable https certificate verification default: false @@ -219,6 +219,7 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw % proxmox_node.tasks(taskid).log.get()[:1]) time.sleep(1) + return False def start_instance(module, proxmox, vm, vmid, timeout): taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() @@ -272,7 +273,7 @@ def main(): api_user = dict(required=True), api_password = dict(no_log=True), vmid = dict(required=True), - https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'), + validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), node = dict(), password = dict(no_log=True), hostname = dict(), @@ -302,7 +303,7 @@ def main(): api_host = module.params['api_host'] api_password = module.params['api_password'] vmid = module.params['vmid'] - https_verify_ssl = module.params['https_verify_ssl'] + validate_certs = module.params['validate_certs'] node = module.params['node'] disk = module.params['disk'] cpus = module.params['cpus'] @@ -319,7 +320,7 @@ def main(): module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: - proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl) + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) except Exception, e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) From 861b4d0c19809db8c954eaeecfe98609aac9a068 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 14:11:51 -0400 Subject: [PATCH 633/720] corrected lvol docs version to 2.0 --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d807f9e8336..3225408d162 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -58,7 +58,7 @@ options: that filesystems get never corrupted/destroyed by mistake. required: false opts: - version_added: "1.9" + version_added: "2.0" description: - Free-form options to be passed to the lvcreate command notes: From 4475676866cdfec3704acf445e46a694d1519433 Mon Sep 17 00:00:00 2001 From: Roman Vyakhirev Date: Wed, 3 Jun 2015 01:57:15 +0300 Subject: [PATCH 634/720] composer module. ignore_platform_reqs option added. --- packaging/language/composer.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 5bbd948595a..cfe3f99b9e7 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -82,6 +82,14 @@ options: default: "yes" choices: [ "yes", "no" ] aliases: [ "optimize-autoloader" ] + ignore_platform_reqs: + version_added: "2.0" + description: + - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "ignore-platform-reqs" ] requirements: - php - composer installed in bin path (recommended /usr/local/bin) @@ -116,14 +124,15 @@ def composer_install(module, command, options): def main(): module = AnsibleModule( argument_spec = dict( - command = dict(default="install", type="str", required=False), - working_dir = dict(aliases=["working-dir"], required=True), - prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), - prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), - no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), - no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), - no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), - optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + command = dict(default="install", type="str", required=False), + working_dir = dict(aliases=["working-dir"], required=True), + prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), + prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), + no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), + no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), + no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), + optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + ignore_platform_reqs = dict(default="no", type="bool", aliases=["ignore-platform-reqs"]), ), supports_check_mode=True ) @@ -153,6 +162,8 @@ def main(): options.append('--no-plugins') if module.params['optimize_autoloader']: options.append('--optimize-autoloader') + if module.params['ignore_platform_reqs']: + options.append('--ignore-platform-reqs') if module.check_mode: options.append('--dry-run') From 1c6ae9333cd9c3b73315407e069bc49ff70e03cd Mon Sep 17 00:00:00 2001 From: Etienne CARRIERE Date: Wed, 3 Jun 2015 08:22:18 +0200 Subject: [PATCH 635/720] Factor common functions for F5 modules --- network/f5/bigip_monitor_http.py | 61 ++++++------------------------ network/f5/bigip_monitor_tcp.py | 64 +++++++------------------------- network/f5/bigip_node.py | 52 +++++--------------------- network/f5/bigip_pool.py | 56 ++++++---------------------- network/f5/bigip_pool_member.py | 54 ++++++--------------------- 5 files changed, 58 insertions(+), 229 deletions(-) diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 6a31afb2ee7..5299bdb0f44 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -163,35 +163,10 @@ EXAMPLES = ''' name: "{{ monitorname }}" ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = 'TTYPE_HTTP' DEFAULT_PARENT_TYPE = 'http' -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def disable_ssl_cert_validation(): - - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def check_monitor_exists(module, api, monitor, parent): @@ -278,7 +253,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -321,15 +295,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - validate_certs = dict(default='yes', type='bool'), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update( dict( name = dict(required=True), parent = dict(default=DEFAULT_PARENT_TYPE), parent_partition = dict(default='Common'), @@ -341,20 +308,20 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] receive_disable = module.params['receive_disable'] @@ -366,11 +333,6 @@ def main(): # end monitor specific stuff - if not validate_certs: - disable_ssl_cert_validation() - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -481,5 +443,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index d5855e0f15d..b5f58da8397 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -181,37 +181,11 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def disable_ssl_cert_validation(): - - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - - def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -234,7 +208,7 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: + try: api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) except bigsuds.OperationFailed, e: if "already exists" in str(e): @@ -298,7 +272,6 @@ def set_integer_property(api, monitor, int_property): def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): @@ -341,15 +314,8 @@ def set_ipport(api, monitor, ipport): def main(): # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - validate_certs = dict(default='yes', type='bool'), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(required=True), type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), parent = dict(default=DEFAULT_PARENT), @@ -361,21 +327,21 @@ def main(): interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - partition = module.params['partition'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + parent_partition = module.params['parent_partition'] - state = module.params['state'] name = module.params['name'] type = 'TTYPE_' + module.params['type'].upper() - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) + parent = fq_name(parent_partition, module.params['parent']) + monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] ip = module.params['ip'] @@ -390,11 +356,6 @@ def main(): # end monitor specific stuff - if not validate_certs: - disable_ssl_cert_validation() - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) @@ -506,5 +467,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 31e34fdeb47..49f721aa8c5 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -188,27 +188,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# ========================== -# bigip_node module specific -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def node_exists(api, address): # hack to determine if node exists result = False @@ -283,42 +262,30 @@ def get_node_monitor_status(api, name): def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( session_state = dict(type='str', choices=['enabled', 'disabled']), monitor_state = dict(type='str', choices=['enabled', 'disabled']), - partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), description = dict(type='str') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] - partition = module.params['partition'] host = module.params['host'] name = module.params['name'] - address = "/%s/%s" % (partition, name) + address = fq_name(partition, name) description = module.params['description'] - if not validate_certs: - disable_ssl_cert_validation() - if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") @@ -410,5 +377,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 2eaaf8f3a34..4d8d599134e 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -228,27 +228,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -368,15 +347,9 @@ def main(): service_down_choices = ['none', 'reset', 'drop', 'reselect'] - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec=f5_argument_spec(); + argument_spec.update(dict( name = dict(type='str', required=True, aliases=['pool']), - partition = dict(type='str', default='Common'), lb_method = dict(type='str', choices=lb_method_choices), monitor_type = dict(type='str', choices=monitor_type_choices), quorum = dict(type='int'), @@ -385,21 +358,18 @@ def main(): service_down_action = dict(type='str', choices=service_down_choices), host = dict(type='str', aliases=['address']), port = dict(type='int') - ), + ) + ) + + module = AnsibleModule( + argument_spec = argument_spec, supports_check_mode=True ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] name = module.params['name'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, name) + pool = fq_name(partition,name) lb_method = module.params['lb_method'] if lb_method: lb_method = lb_method.lower() @@ -411,16 +381,13 @@ def main(): if monitors: monitors = [] for monitor in module.params['monitors']: - if "/" not in monitor: - monitors.append("/%s/%s" % (partition, monitor)) - else: - monitors.append(monitor) + monitors.append(fq_name(partition, monitor)) slow_ramp_time = module.params['slow_ramp_time'] service_down_action = module.params['service_down_action'] if service_down_action: service_down_action = service_down_action.lower() host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition,host) port = module.params['port'] if not validate_certs: @@ -551,5 +518,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index bc4b7be2f7b..1d59462023f 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -196,27 +196,6 @@ EXAMPLES = ''' ''' -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool_member module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -327,49 +306,37 @@ def get_member_monitor_status(api, pool, address, port): return result def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + argument_spec = f5_argument_spec(); + argument_spec.update(dict( session_state = dict(type='str', choices=['enabled', 'disabled']), monitor_state = dict(type='str', choices=['enabled', 'disabled']), pool = dict(type='str', required=True), - partition = dict(type='str', default='Common'), host = dict(type='str', required=True, aliases=['address', 'name']), port = dict(type='int', required=True), connection_limit = dict(type='int'), description = dict(type='str'), rate_limit = dict(type='int'), ratio = dict(type='int') - ), - supports_check_mode=True + ) ) - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") + module = AnsibleModule( + argument_spec = argument_spec, + supports_check_mode=True + ) - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - state = module.params['state'] + (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, module.params['pool']) + pool = fq_name(partition, module.params['pool']) connection_limit = module.params['connection_limit'] description = module.params['description'] rate_limit = module.params['rate_limit'] ratio = module.params['ratio'] host = module.params['host'] - address = "/%s/%s" % (partition, host) + address = fq_name(partition, host) port = module.params['port'] - if not validate_certs: - disable_ssl_cert_validation() # sanity check user supplied values @@ -457,5 +424,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * main() From 1291f9a25ae0628ef3664d0d797ca0546f03848f Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Wed, 3 Jun 2015 13:15:59 +0200 Subject: [PATCH 636/720] Added datadog_monitor module --- monitoring/datadog_monitor.py | 278 ++++++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 monitoring/datadog_monitor.py diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py new file mode 100644 index 00000000000..b5ad2d2d6d6 --- /dev/null +++ b/monitoring/datadog_monitor.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Sebastian Kornehl +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# import module snippets + +# Import Datadog +try: + from datadog import initialize, api + HAS_DATADOG = True +except: + HAS_DATADOG = False + +DOCUMENTATION = ''' +--- +module: datadog_monitor +short_description: Manages Datadog monitors +description: +- "Manages monitors within Datadog" +- "Options like described on http://docs.datadoghq.com/api/" +version_added: "2.0" +author: '"Sebastian Kornehl" ' +notes: [] +requirements: [datadog] +options: + api_key: + description: ["Your DataDog API key."] + required: true + default: null + app_key: + description: ["Your DataDog app key."] + required: true + default: null + state: + description: ["The designated state of the monitor."] + required: true + default: null + choices: ['present', 'absent', 'muted', 'unmuted'] + type: + description: ["The type of the monitor."] + required: false + default: null + choices: ['metric alert', 'service check'] + query: + description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] + required: false + default: null + name: + description: ["The name of the alert."] + required: true + default: null + message: + description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] + required: false + default: null + silenced: + description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "] + required: false + default: "" + notify_no_data: + description: ["A boolean indicating whether this monitor will notify when data stops reporting.."] + required: false + default: False + no_data_timeframe: + description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."] + required: false + default: 2x timeframe for metric, 2 minutes for service + timeout_h: + description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."] + required: false + default: null + renotify_interval: + description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."] + required: false + default: null + escalation_message: + description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"] + required: false + default: null + notify_audit: + description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."] + required: false + default: False + thresholds: + description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."] + required: false + default: {'ok': 1, 'critical': 1, 'warning': 1} +''' + +EXAMPLES = ''' +# Create a metric monitor +datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + query: "datadog.agent.up".over("host:host1").last(2).count_by_status()" + message: "Some message." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Deletes a monitor +datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Mutes a monitor +datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Unmutes a monitor +datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True), + app_key=dict(required=True), + state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']), + type=dict(required=False, choises=['metric alert', 'service check']), + name=dict(required=True), + query=dict(required=False), + message=dict(required=False, default=None), + silenced=dict(required=False, default=None, type='dict'), + notify_no_data=dict(required=False, default=False, choices=BOOLEANS), + no_data_timeframe=dict(required=False, default=None), + timeout_h=dict(required=False, default=None), + renotify_interval=dict(required=False, default=None), + escalation_message=dict(required=False, default=None), + notify_audit=dict(required=False, default=False, choices=BOOLEANS), + thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg='datadogpy required for this module') + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _get_monitor(module): + for monitor in api.Monitor.get_all(): + if monitor['name'] == module.params['name']: + return monitor + return {} + + +def _post_monitor(module, options): + try: + msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def _update_monitor(module, monitor, options): + try: + msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], + name=module.params['name'], message=module.params['message'], + options=options) + if len(set(msg) - set(monitor)) == 0: + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif (module.params['silenced'] is not None + and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception, e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() From 5bfe8f2a44e3a7637fee173f92c9775f5fe8a7be Mon Sep 17 00:00:00 2001 From: Roman Vyakhirev Date: Thu, 4 Jun 2015 01:25:08 +0300 Subject: [PATCH 637/720] bower module. Non-interactive mode and allow-root moved to _exec, they should affect all commands --- packaging/language/bower.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 34284356f6e..8fbe20f7e0c 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -86,7 +86,7 @@ class Bower(object): def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = ["bower"] + args + cmd = ["bower"] + args + ['--config.interactive=false', '--allow-root'] if self.name: cmd.append(self.name_version) @@ -108,7 +108,7 @@ class Bower(object): return '' def list(self): - cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] + cmd = ['list', '--json'] installed = list() missing = list() From fdaa4da4476f60213c81ccd5e98d52d7ece6a415 Mon Sep 17 00:00:00 2001 From: Sebastian Kornehl Date: Thu, 4 Jun 2015 06:54:02 +0200 Subject: [PATCH 638/720] docs: removed default when required is true --- monitoring/datadog_monitor.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index b5ad2d2d6d6..24de8af10ba 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -41,15 +41,12 @@ options: api_key: description: ["Your DataDog API key."] required: true - default: null app_key: description: ["Your DataDog app key."] required: true - default: null state: description: ["The designated state of the monitor."] required: true - default: null choices: ['present', 'absent', 'muted', 'unmuted'] type: description: ["The type of the monitor."] @@ -63,7 +60,6 @@ options: name: description: ["The name of the alert."] required: true - default: null message: description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] required: false From e972346faea861c1a1067c142d5bf8c4efe331ce Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser Date: Thu, 4 Jun 2015 22:17:16 +0100 Subject: [PATCH 639/720] Webfaction will create a default database user when db is created. For symmetry and repeatability, delete it when db is deleted. Add missing param to documentation. --- cloud/webfaction/webfaction_db.py | 48 ++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index a9ef88b943e..1a91d649458 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -4,7 +4,7 @@ # # ------------------------------------------ # -# (c) Quentin Stafford-Fraser 2015 +# (c) Quentin Stafford-Fraser and Andy Baker 2015 # # This file is part of Ansible # @@ -53,6 +53,12 @@ options: required: true choices: ['mysql', 'postgresql'] + password: + description: + - The password for the new database user. + required: false + default: None + login_name: description: - The webfaction account to use @@ -75,6 +81,10 @@ EXAMPLES = ''' type: mysql login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" + + # Note that, for symmetry's sake, deleting a database using + # 'state: absent' will also delete the matching user. + ''' import socket @@ -110,13 +120,17 @@ def main(): db_map = dict([(i['name'], i) for i in db_list]) existing_db = db_map.get(db_name) + user_list = webfaction.list_db_users(session_id) + user_map = dict([(i['username'], i) for i in user_list]) + existing_user = user_map.get(db_name) + result = {} # Here's where the real stuff happens if db_state == 'present': - # Does an app with this name already exist? + # Does an database with this name already exist? if existing_db: # Yes, but of a different type - fail if existing_db['db_type'] != db_type: @@ -129,8 +143,8 @@ def main(): if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args + # If this isn't a dry run, create the db + # and default user. result.update( webfaction.create_db( session_id, db_name, db_type, db_passwd @@ -139,17 +153,23 @@ def main(): elif db_state == 'absent': - # If the app's already not there, nothing changed. - if not existing_db: - module.exit_json( - changed = False, - ) - + # If this isn't a dry run... if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) + + if not (existing_db or existing_user): + module.exit_json(changed = False,) + + if existing_db: + # Delete the db if it exists + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + if existing_user: + # Delete the default db user if it exists + result.update( + webfaction.delete_db_user(session_id, db_name, db_type) + ) else: module.fail_json(msg="Unknown state specified: {}".format(db_state)) From cc9d2ad03ff5c7dd5d0202bf1b7e69a56c2943cb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 11:25:27 -0400 Subject: [PATCH 640/720] minor docs update --- cloud/webfaction/webfaction_app.py | 2 +- cloud/webfaction/webfaction_db.py | 2 +- cloud/webfaction/webfaction_domain.py | 2 +- cloud/webfaction/webfaction_mailbox.py | 2 +- cloud/webfaction/webfaction_site.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 55599bdcca6..3e42ec1265e 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -31,7 +31,7 @@ module: webfaction_app short_description: Add or remove applications on a Webfaction host description: - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 1a91d649458..f420490711c 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -28,7 +28,7 @@ module: webfaction_db short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index f2c95897bc5..0b35faf110f 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -28,7 +28,7 @@ module: webfaction_domain short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index 976a428f3d3..7547b6154e5 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -27,7 +27,7 @@ module: webfaction_mailbox short_description: Add or remove mailboxes on Webfaction description: - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index 223458faf46..57eae39c0dc 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -28,7 +28,7 @@ module: webfaction_site short_description: Add or remove a website on a Webfaction host description: - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser +author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. From 653ce424e094ee65223bd1efd0dd7f9d6d49fdb7 Mon Sep 17 00:00:00 2001 From: "jonathan.lestrelin" Date: Fri, 5 Jun 2015 18:18:48 +0200 Subject: [PATCH 641/720] Add pear packaging module to manage PHP PEAR an PECL packages --- packaging/language/pear.py | 230 +++++++++++++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 packaging/language/pear.py diff --git a/packaging/language/pear.py b/packaging/language/pear.py new file mode 100644 index 00000000000..c9e3862a31f --- /dev/null +++ b/packaging/language/pear.py @@ -0,0 +1,230 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer +# (c) 2015, Jonathan Lestrelin +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +author: + - "'jonathan.lestrelin' " +notes: [] +requirements: [] +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: true + default: null + + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent", "latest"] +''' + +EXAMPLES = ''' +# Install pear package +- pear: name=Net_URL2 state=present + +# Install pecl package +- pear: name=pecl/json_post state=present + +# Upgrade package +- pear: name=Net_URL2 state=latest + +# Remove packages +- pear: name=Net_URL2,pecl/json_post state=absent +''' + +import os + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': continue + return installed + return None + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + +def query_package(module, name, state="present"): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + if state == "present": + lcmd = "pear info %s" % (name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = "pear remote-info %s" % (name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = "pear uninstall %s" % (package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages, package_files): + install_c = 0 + + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + cmd = "pear %s %s" % (command, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s" % (package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + +import os + +def exe_exists(program): + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): + return True + + return False + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=['pkg']), + state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])), + required_one_of = [['name']], + supports_check_mode = True) + + if not exe_exists("pear"): + module.fail_json(msg="cannot find pear executable in PATH") + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, pkg_files) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() From 9d4046f44bd05589594c6d4b3ef178abd2b4758b Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:13:11 +0200 Subject: [PATCH 642/720] puppet: ensure puppet is in live mode per default puppet may be configured to operate in `--noop` mode per default. That is why we must pass a `--no-noop` to make sure, changes are going to be applied. --- system/puppet.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/system/puppet.py b/system/puppet.py index 46a5ea58d4f..3d4223bd1e5 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -156,10 +156,14 @@ def main(): cmd += " --show-diff" if module.check_mode: cmd += " --noop" + else: + cmd += " --no-noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd if module.check_mode: cmd += "--noop " + else: + cmd += "--no-noop " cmd += pipes.quote(p['manifest']) rc, stdout, stderr = module.run_command(cmd) From 616a56f871901635d6ea27525f7d4b005048e8b2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:42:56 +0200 Subject: [PATCH 643/720] puppet: add --environment support --- system/puppet.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/system/puppet.py b/system/puppet.py index 3d4223bd1e5..c9e7943ff25 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -59,6 +59,11 @@ options: - Basename of the facter output file required: false default: ansible + environment: + desciption: + - Puppet environment to be used. + required: false + default: None requirements: [ puppet ] author: Monty Taylor ''' @@ -69,6 +74,9 @@ EXAMPLES = ''' # Run puppet and timeout in 5 minutes - puppet: timeout=5m + +# Run puppet using a different environment +- puppet: environment=testing ''' @@ -104,6 +112,7 @@ def main(): default=False, aliases=['show-diff'], type='bool'), facts=dict(default=None), facter_basename=dict(default='ansible'), + environment=dict(required=False, default=None), ), supports_check_mode=True, required_one_of=[ @@ -154,12 +163,16 @@ def main(): puppetmaster=pipes.quote(p['puppetmaster'])) if p['show_diff']: cmd += " --show-diff" + if p['environment']: + cmd += " --environment '%s'" % p['environment'] if module.check_mode: cmd += " --noop" else: cmd += " --no-noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd + if p['environment']: + cmd += "--environment '%s' " % p['environment'] if module.check_mode: cmd += "--noop " else: From c277946fb306c826be54c25e283c683557b7c2c5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 09:46:16 +0200 Subject: [PATCH 644/720] puppet: fix missing space between command and arg Fixes: ~~~ { "cmd": "/usr/bin/puppetconfig print agent_disabled_lockfile", "failed": true, "msg": "[Errno 2] No such file or directory", "rc": 2 } ~~~ --- system/puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index c9e7943ff25..9e2994225c4 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -137,7 +137,7 @@ def main(): # Check if puppet is disabled here if p['puppetmaster']: rc, stdout, stderr = module.run_command( - PUPPET_CMD + "config print agent_disabled_lockfile") + PUPPET_CMD + " config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): module.fail_json( msg="Puppet agent is administratively disabled.", disabled=True) From e633d9946fc9048e5ec258552fa018d8da27d18d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 6 Jun 2015 10:08:16 +0200 Subject: [PATCH 645/720] puppet: make arg puppetmaster optional puppetmaster was used to determine if `agent` or `apply` should be used. But puppetmaster is not required by puppet per default. Puppet may have a config or could find out by itself (...) where the puppet master is. It changed the code so we only use `apply` if a manifest was passed, otherwise we use `agent`. This also fixes the example, which did not work the way without this change. ~~~ # Run puppet agent and fail if anything goes wrong - puppet ~~~ --- system/puppet.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/system/puppet.py b/system/puppet.py index 9e2994225c4..83bbcbe6e18 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -35,12 +35,12 @@ options: default: 30m puppetmaster: description: - - The hostname of the puppetmaster to contact. Must have this or manifest + - The hostname of the puppetmaster to contact. required: false default: None manifest: desciption: - - Path to the manifest file to run puppet apply on. Must have this or puppetmaster + - Path to the manifest file to run puppet apply on. required: false default: None show_diff: @@ -69,7 +69,7 @@ author: Monty Taylor ''' EXAMPLES = ''' -# Run puppet and fail if anything goes wrong +# Run puppet agent and fail if anything goes wrong - puppet # Run puppet and timeout in 5 minutes @@ -115,7 +115,7 @@ def main(): environment=dict(required=False, default=None), ), supports_check_mode=True, - required_one_of=[ + mutually_exclusive=[ ('puppetmaster', 'manifest'), ], ) @@ -135,7 +135,7 @@ def main(): manifest=p['manifest'])) # Check if puppet is disabled here - if p['puppetmaster']: + if not p['manifest']: rc, stdout, stderr = module.run_command( PUPPET_CMD + " config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): @@ -154,13 +154,14 @@ def main(): base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) - if p['puppetmaster']: + if not p['manifest']: cmd = ("%(base_cmd)s agent --onetime" - " --server %(puppetmaster)s" " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" " --detailed-exitcodes --verbose") % dict( base_cmd=base_cmd, - puppetmaster=pipes.quote(p['puppetmaster'])) + ) + if p['puppetmaster']: + cmd += " -- server %s" % pipes.quote(p['puppetmaster']) if p['show_diff']: cmd += " --show-diff" if p['environment']: From b5d22eb1ec6c7cd2cbef14554fc92c86c2e24452 Mon Sep 17 00:00:00 2001 From: Pepe Barbe Date: Sun, 7 Jun 2015 13:18:33 -0500 Subject: [PATCH 646/720] Refactor win_chocolatey module * Refactor code to be more robust. Run main logic inside a try {} catch {} block. If there is any error, bail out and log all the command output automatically. * Rely on error code generated by chocolatey instead of scraping text output to determine success/failure. * Add support for unattended installs: (`-y` flag is a requirement by chocolatey) * Before (un)installing, check existence of files. * Use functions to abstract logic * The great rewrite of 0.9.9, the `choco` interface has changed, check if chocolatey is installed and an older version. If so upgrade to latest. * Allow upgrading packages that are already installed * Use verbose logging for chocolate actions * Adding functionality to specify a source for a chocolatey repository. (@smadam813) * Removing pre-determined sources and adding specified source url in it's place. (@smadam813) Contains contributions from: * Adam Keech (@smadam813) --- windows/win_chocolatey.ps1 | 339 ++++++++++++++++++++++--------------- windows/win_chocolatey.py | 43 ++--- 2 files changed, 218 insertions(+), 164 deletions(-) diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index de42434da76..4a033d23157 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -16,25 +16,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +$ErrorActionPreference = "Stop" + # WANT_JSON # POWERSHELL_COMMON -function Write-Log -{ - param - ( - [parameter(mandatory=$false)] - [System.String] - $message - ) - - $date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz' - - Write-Host "$date | $message" - - Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append -} - $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; @@ -48,21 +34,22 @@ Else Fail-Json $result "missing required argument: name" } -if(($params.logPath).length -gt 0) +If ($params.force) { - $global:LoggingFile = $params.logPath + $force = $params.force | ConvertTo-Bool } -else +Else { - $global:LoggingFile = "c:\ansible-playbook.log" + $force = $false } -If ($params.force) + +If ($params.upgrade) { - $force = $params.force | ConvertTo-Bool + $upgrade = $params.upgrade | ConvertTo-Bool } Else { - $force = $false + $upgrade = $false } If ($params.version) @@ -74,6 +61,15 @@ Else $version = $null } +If ($params.source) +{ + $source = $params.source.ToString().ToLower() +} +Else +{ + $source = $null +} + If ($params.showlog) { $showlog = $params.showlog | ConvertTo-Bool @@ -96,157 +92,230 @@ Else $state = "present" } -$ChocoAlreadyInstalled = get-command choco -ErrorAction 0 -if ($ChocoAlreadyInstalled -eq $null) +Function Chocolatey-Install-Upgrade { - #We need to install chocolatey - $install_choco_result = iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) - $result.changed = $true - $executable = "C:\ProgramData\chocolatey\bin\choco.exe" -} -Else -{ - $executable = "choco.exe" -} + [CmdletBinding()] -If ($params.source) -{ - $source = $params.source.ToString().ToLower() - If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby") -and (!$source.startsWith("http://", "CurrentCultureIgnoreCase")) -and (!$source.startsWith("https://", "CurrentCultureIgnoreCase"))) + param() + + $ChocoAlreadyInstalled = get-command choco -ErrorAction 0 + if ($ChocoAlreadyInstalled -eq $null) + { + #We need to install chocolatey + iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) + $result.changed = $true + $script:executable = "C:\ProgramData\chocolatey\bin\choco.exe" + } + else { - Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi, windowsfeatures or a custom source url." + $script:executable = "choco.exe" + + if ((choco --version) -lt '0.9.9') + { + Choco-Upgrade chocolatey + } } } -Elseif (!$params.source) + + +Function Choco-IsInstalled { - $source = "chocolatey" + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package + ) + + $cmd = "$executable list --local-only $package" + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + + Throw "Error checking installation status for $package" + } + + If ("$results" -match " $package .* (\d+) packages installed.") + { + return $matches[1] -gt 0 + } + + $false } -if ($source -eq "webpi") +Function Choco-Upgrade { - # check whether 'webpi' installation source is available; if it isn't, install it - $webpi_check_cmd = "$executable list webpicmd -localonly" - $webpi_check_result = invoke-expression $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_cmd" $webpi_check_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_check_log" $webpi_check_result - if ( - ( - ($webpi_check_result.GetType().Name -eq "String") -and - ($webpi_check_result -match "No packages found") - ) -or - ($webpi_check_result -contains "No packages found.") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #lessmsi is a webpicmd dependency, but dependency resolution fails unless it's installed separately - $lessmsi_install_cmd = "$executable install lessmsi" - $lessmsi_install_result = invoke-expression $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_cmd" $lessmsi_install_cmd - Set-Attr $result "chocolatey_bootstrap_lessmsi_install_log" $lessmsi_install_result + throw "$package is not installed, you cannot upgrade" + } - $webpi_install_cmd = "$executable install webpicmd" - $webpi_install_result = invoke-expression $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_cmd" $webpi_install_cmd - Set-Attr $result "chocolatey_bootstrap_webpi_install_log" $webpi_install_result + $cmd = "$executable upgrade -dv -y $package" - if (($webpi_install_result | select-string "already installed").length -gt 0) - { - #no change - } - elseif (($webpi_install_result | select-string "webpicmd has finished successfully").length -gt 0) + if ($version) + { + $cmd += " -version $version" + } + + if ($source) + { + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" + } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" + } + + if ("$results" -match ' upgraded (\d+)/\d+ package\(s\)\. ') + { + if ($matches[1] -gt 0) { $result.changed = $true } - Else - { - Fail-Json $result "WebPI install error: $webpi_install_result" - } } } -$expression = $executable -if ($state -eq "present") -{ - $expression += " install $package" -} -Elseif ($state -eq "absent") -{ - $expression += " uninstall $package" -} -if ($force) + +Function Choco-Install { - if ($state -eq "present") + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [string]$source, + [Parameter(Mandatory=$false, Position=4)] + [bool]$force, + [Parameter(Mandatory=$false, Position=5)] + [bool]$upgrade + ) + + if (Choco-IsInstalled $package) { - $expression += " -force" + if ($upgrade) + { + Choco-Upgrade -package $package -version $version -source $source -force $force + } + + return } -} -if ($version) -{ - $expression += " -version $version" -} -if ($source -eq "chocolatey") -{ - $expression += " -source https://chocolatey.org/api/v2/" -} -elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source -eq "ruby")) -{ - $expression += " -source $source" -} -elseif(($source -ne $Null) -and ($source -ne "")) -{ - $expression += " -source $source" -} -Set-Attr $result "chocolatey command" $expression -$op_result = invoke-expression $expression -if ($state -eq "present") -{ - if ( - (($op_result | select-string "already installed").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "No products to be installed").length -gt 0) - ) + $cmd = "$executable install -dv -y $package" + + if ($version) { - #no change + $cmd += " -version $version" } - elseif ( - (($op_result | select-string "has finished successfully").length -gt 0) -or - # webpi has different text output, and that doesn't include the package name but instead the human-friendly name - (($op_result | select-string "Install of Products: SUCCESS").length -gt 0) -or - (($op_result | select-string "gem installed").length -gt 0) -or - (($op_result | select-string "gems installed").length -gt 0) - ) + + if ($source) { - $result.changed = $true + $cmd += " -source $source" + } + + if ($force) + { + $cmd += " -force" } - Else + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) { - Fail-Json $result "Install error: $op_result" + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error installing $package" } + + $result.changed = $true } -Elseif ($state -eq "absent") + +Function Choco-Uninstall { - $op_result = invoke-expression "$executable uninstall $package" - # HACK: Misleading - 'Uninstalling from folder' appears in output even when package is not installed, hence order of checks this way - if ( - (($op_result | select-string "not installed").length -gt 0) -or - (($op_result | select-string "Cannot find path").length -gt 0) + [CmdletBinding()] + + param( + [Parameter(Mandatory=$true, Position=1)] + [string]$package, + [Parameter(Mandatory=$false, Position=2)] + [string]$version, + [Parameter(Mandatory=$false, Position=3)] + [bool]$force ) + + if (-not (Choco-IsInstalled $package)) { - #no change + return } - elseif (($op_result | select-string "Uninstalling from folder").length -gt 0) + + $cmd = "$executable uninstall -dv -y $package" + + if ($version) { - $result.changed = $true + $cmd += " -version $version" } - else + + if ($force) { - Fail-Json $result "Uninstall error: $op_result" + $cmd += " -force" } + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "choco_error_cmd" $cmd + Set-Attr $result "choco_error_log" "$results" + Throw "Error uninstalling $package" + } + + $result.changed = $true } +Try +{ + Chocolatey-Install-Upgrade + + if ($state -eq "present") + { + Choco-Install -package $package -version $version -source $source ` + -force $force -upgrade $upgrade + } + else + { + Choco-Uninstall -package $package -version $version -force $force + } -if ($showlog) + Exit-Json $result; +} +Catch { - Set-Attr $result "chocolatey_log" $op_result + Fail-Json $result $_.Exception.Message } -Set-Attr $result "chocolatey_success" "true" -Exit-Json $result; diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index 63ec1ecd214..fe00f2e0f6a 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -53,42 +53,29 @@ options: - no default: no aliases: [] - version: + upgrade: description: - - Specific version of the package to be installed - - Ignored when state == 'absent' - required: false - default: null - aliases: [] - showlog: - description: - - Outputs the chocolatey log inside a chocolatey_log property. + - If package is already installed it, try to upgrade to the latest version or to the specified version required: false choices: - yes - no default: no aliases: [] - source: + version: description: - - Which source to install from - require: false - choices: - - chocolatey - - ruby - - webpi - - windowsfeatures - default: chocolatey + - Specific version of the package to be installed + - Ignored when state == 'absent' + required: false + default: null aliases: [] - logPath: + source: description: - - Where to log command output to + - Specify source rather than using default chocolatey repository require: false - default: c:\\ansible-playbook.log + default: null aliases: [] -author: - - '"Trond Hindenes (@trondhindenes)" ' - - '"Peter Mounce (@petemounce)" ' +author: Trond Hindenes, Peter Mounce, Pepe Barbe, Adam Keech ''' # TODO: @@ -111,10 +98,8 @@ EXAMPLES = ''' name: git state: absent - # Install Application Request Routing v3 from webpi - # Logically, this requires that you install IIS first (see win_feature) - # To find a list of packages available via webpi source, `choco list -source webpi` + # Install git from specified repository win_chocolatey: - name: ARRv3 - source: webpi + name: git + source: https://someserver/api/v2/ ''' From 16851baaf746ad7f5f29c50623c159329fdc219b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 7 Jun 2015 17:45:33 -0400 Subject: [PATCH 647/720] added missing options: --- cloud/cloudstack/cs_project.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index b604a1b6f32..e604abc13db 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -26,6 +26,7 @@ description: - Create, update, suspend, activate and remove projects. version_added: '2.0' author: '"René Moser (@resmo)" ' +options: name: description: - Name of the project. From 2e6a16fbc7f4e8b919adf124a894ce1d8136737c Mon Sep 17 00:00:00 2001 From: "jonathan.lestrelin" Date: Mon, 8 Jun 2015 09:28:01 +0200 Subject: [PATCH 648/720] Fix unused import and variable and correct documentation --- packaging/language/pear.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/packaging/language/pear.py b/packaging/language/pear.py index c9e3862a31f..5762f9c815c 100644 --- a/packaging/language/pear.py +++ b/packaging/language/pear.py @@ -26,16 +26,14 @@ module: pear short_description: Manage pear/pecl packages description: - Manage PHP packages with the pear package manager. +version_added: 2.0 author: - "'jonathan.lestrelin' " -notes: [] -requirements: [] options: name: description: - Name of the package to install, upgrade, or remove. required: true - default: null state: description: @@ -132,7 +130,7 @@ def remove_packages(module, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, state, packages, package_files): +def install_packages(module, state, packages): install_c = 0 for i, package in enumerate(packages): @@ -178,7 +176,6 @@ def check_packages(module, packages, state): else: module.exit_json(change=False, msg="package(s) already %s" % state) -import os def exe_exists(program): for path in os.environ["PATH"].split(os.pathsep): @@ -220,7 +217,7 @@ def main(): check_packages(module, pkgs, p['state']) if p['state'] in ['present', 'latest']: - install_packages(module, p['state'], pkgs, pkg_files) + install_packages(module, p['state'], pkgs) elif p['state'] == 'absent': remove_packages(module, pkgs) From d722d6de976328d3fdbde64ca7ecef5cf5516037 Mon Sep 17 00:00:00 2001 From: Jhonny Everson Date: Mon, 8 Jun 2015 17:46:53 -0300 Subject: [PATCH 649/720] Adds handler for error responses --- monitoring/datadog_monitor.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 24de8af10ba..97968ed648d 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -187,7 +187,10 @@ def _post_monitor(module, options): msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) - module.exit_json(changed=True, msg=msg) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) @@ -197,7 +200,9 @@ def _update_monitor(module, monitor, options): msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) - if len(set(msg) - set(monitor)) == 0: + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif len(set(msg) - set(monitor)) == 0: module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) @@ -243,7 +248,7 @@ def mute_monitor(module): module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None + elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): module.exit_json(changed=False) try: From 1d49d4af092eb12329149f087e7bca2574509599 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 9 Jun 2015 13:06:24 +0200 Subject: [PATCH 650/720] cloudstack: fix project name must not be case sensitiv --- cloud/cloudstack/cs_project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index e604abc13db..13209853527 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -167,7 +167,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack): projects = self.cs.listProjects(**args) if projects: for p in projects['project']: - if project in [ p['name'], p['id']]: + if project.lower() in [ p['name'].lower(), p['id']]: self.project = p break return self.project From ed0395e2cccab1506ff7f103122fc02e46ca6fb9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 9 Jun 2015 13:08:38 +0200 Subject: [PATCH 651/720] cloudstack: remove listall in cs_project listall in cs_project can return the wrong project for root admins, because project name are not unique in separate accounts. --- cloud/cloudstack/cs_project.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index 13209853527..b505433892e 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -160,7 +160,6 @@ class AnsibleCloudStackProject(AnsibleCloudStack): project = self.module.params.get('name') args = {} - args['listall'] = True args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') From 4b625bab34a3bb57dd4d966473c42458c89f01f3 Mon Sep 17 00:00:00 2001 From: Jhonny Everson Date: Tue, 9 Jun 2015 09:44:34 -0300 Subject: [PATCH 652/720] Fixes the bug where it was using only the keys to determine whether a change was made, i.e. values changes for existing keys was reported incorrectly. --- monitoring/datadog_monitor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 97968ed648d..cb54cd32b5d 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -194,6 +194,10 @@ def _post_monitor(module, options): except Exception, e: module.fail_json(msg=str(e)) +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) def _update_monitor(module, monitor, options): try: @@ -202,7 +206,7 @@ def _update_monitor(module, monitor, options): options=options) if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) - elif len(set(msg) - set(monitor)) == 0: + elif _equal_dicts(msg, monitor, ['creator', 'overall_state']): module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) From 3bd19b8ea0cee5b5274eae0dc15492253fec2346 Mon Sep 17 00:00:00 2001 From: David Siefert Date: Tue, 9 Jun 2015 10:21:33 -0500 Subject: [PATCH 653/720] Adding support for setting the topic of a channel --- notification/irc.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index 8b87c41f1ba..e6852c8510a 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -47,6 +47,12 @@ options: - The message body. required: true default: null + topic: + description: + - Set the channel topic + required: false + default: null + version_added: 2.0 color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). @@ -106,7 +112,7 @@ import ssl from time import sleep -def send_msg(channel, msg, server='localhost', port='6667', key=None, +def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None, nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' @@ -163,6 +169,10 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, raise Exception('Timeout waiting for IRC JOIN response') sleep(0.5) + if topic is not None: + irc.send('TOPIC %s :%s\r\n' % (channel, topic)) + sleep(1) + irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) @@ -186,6 +196,7 @@ def main(): "blue", "black", "none"]), channel=dict(required=True), key=dict(), + topic=dict(), passwd=dict(), timeout=dict(type='int', default=30), use_ssl=dict(type='bool', default=False) @@ -196,6 +207,7 @@ def main(): server = module.params["server"] port = module.params["port"] nick = module.params["nick"] + topic = module.params["topic"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] @@ -205,7 +217,7 @@ def main(): use_ssl = module.params["use_ssl"] try: - send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) + send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) From 98abb6d2c9ee1e8ae4d88e3577edcd243622d685 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 9 Jun 2015 12:58:45 -0400 Subject: [PATCH 654/720] Adding author's github id --- monitoring/datadog_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index cb54cd32b5d..f1acb169ce0 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -34,7 +34,7 @@ description: - "Manages monitors within Datadog" - "Options like described on http://docs.datadoghq.com/api/" version_added: "2.0" -author: '"Sebastian Kornehl" ' +author: '"Sebastian Kornehl (@skornehl)" ' notes: [] requirements: [datadog] options: From f33bbe6e496e9308d06512dcd1741419077c0252 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 13:00:02 +0200 Subject: [PATCH 655/720] puppet: update author to new format --- system/puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index 83bbcbe6e18..336b2c81108 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -65,7 +65,7 @@ options: required: false default: None requirements: [ puppet ] -author: Monty Taylor +author: "Monty Taylor (@emonty)" ''' EXAMPLES = ''' From 0d7332d550f9896732e586cc17492f99724f748f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 12:58:44 -0400 Subject: [PATCH 656/720] minor docfix --- monitoring/nagios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 543f094b70e..0026751ea58 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -77,7 +77,7 @@ options: version_added: "2.0" description: - the Servicegroup we want to set downtimes/alerts for. - B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: - The raw command to send to nagios, which From c842c71708b3242ad9c15f4d0251fdbf9d0f2aaf Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 23:31:48 +0200 Subject: [PATCH 657/720] cloudstack: add new module cs_network --- cloud/cloudstack/cs_network.py | 637 +++++++++++++++++++++++++++++++++ 1 file changed, 637 insertions(+) create mode 100644 cloud/cloudstack/cs_network.py diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py new file mode 100644 index 00000000000..c8b3b32539d --- /dev/null +++ b/cloud/cloudstack/cs_network.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_network +short_description: Manages networks on Apache CloudStack based clouds. +description: + - Create, update, restart and delete networks. +version_added: '2.0' +author: '"René Moser (@resmo)" ' +options: + name: + description: + - Name (case sensitive) of the network. + required: true + displaytext: + description: + - Displaytext of the network. + - If not specified, C(name) will be used as displaytext. + required: false + default: null + network_offering: + description: + - Name of the offering for the network. + - Required if C(state=present). + required: false + default: null + start_ip: + description: + - The beginning IPv4 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ip: + description: + - The ending IPv4 address of the network belongs to. + - If not specified, value of C(start_ip) is used. + - Only considered on create. + required: false + default: null + gateway: + description: + - The gateway of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + netmask: + description: + - The netmask of the network. + - Required for shared networks and isolated networks when it belongs to VPC. + - Only considered on create. + required: false + default: null + start_ipv6: + description: + - The beginning IPv6 address of the network belongs to. + - Only considered on create. + required: false + default: null + end_ipv6: + description: + - The ending IPv6 address of the network belongs to. + - If not specified, value of C(start_ipv6) is used. + - Only considered on create. + required: false + default: null + cidr_ipv6: + description: + - CIDR of IPv6 network, must be at least /64. + - Only considered on create. + required: false + default: null + gateway_ipv6: + description: + - The gateway of the IPv6 network. + - Required for shared networks. + - Only considered on create. + required: false + default: null + vlan: + description: + - The ID or VID of the network. + required: false + default: null + vpc: + description: + - The ID or VID of the network. + required: false + default: null + isolated_pvlan: + description: + - The isolated private vlan for this network. + required: false + default: null + clean_up: + description: + - Cleanup old network elements. + - Only considered on C(state=restarted). + required: false + default: null + acl_type: + description: + - Access control type. + - Only considered on create. + required: false + default: account + choices: [ 'account', 'domain' ] + network_domain: + description: + - The network domain. + required: false + default: null + state: + description: + - State of the network. + required: false + default: present + choices: [ 'present', 'absent', 'restarted' ] + zone: + description: + - Name of the zone in which the network should be deployed. + - If not set, default zone is used. + required: false + default: null + project: + description: + - Name of the project the network to be deployed in. + required: false + default: null + domain: + description: + - Domain the network is related to. + required: false + default: null + account: + description: + - Account the network is related to. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a network +- local_action: + module: cs_network + name: my network + zone: gva-01 + network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService + network_domain: example.com + +# update a network +- local_action: + module: cs_network + name: my network + displaytext: network of domain example.local + network_domain: example.local + +# restart a network with clean up +- local_action: + module: cs_network + name: my network + clean_up: yes + state: restared + +# remove a network +- local_action: + module: cs_network + name: my network + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the network. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the network. + returned: success + type: string + sample: web project +displaytext: + description: Display text of the network. + returned: success + type: string + sample: web project +dns1: + description: IP address of the 1st nameserver. + returned: success + type: string + sample: 1.2.3.4 +dns2: + description: IP address of the 2nd nameserver. + returned: success + type: string + sample: 1.2.3.4 +cidr: + description: IPv4 network CIDR. + returned: success + type: string + sample: 10.101.64.0/24 +gateway: + description: IPv4 gateway. + returned: success + type: string + sample: 10.101.64.1 +netmask: + description: IPv4 netmask. + returned: success + type: string + sample: 255.255.255.0 +cidr_ipv6: + description: IPv6 network CIDR. + returned: success + type: string + sample: 2001:db8::/64 +gateway_ipv6: + description: IPv6 gateway. + returned: success + type: string + sample: 2001:db8::1 +state: + description: State of the network. + returned: success + type: string + sample: Implemented +zone: + description: Name of zone. + returned: success + type: string + sample: ch-gva-2 +domain: + description: Domain the network is related to. + returned: success + type: string + sample: ROOT +account: + description: Account the network is related to. + returned: success + type: string + sample: example account +project: + description: Name of project. + returned: success + type: string + sample: Production +tags: + description: List of resource tags associated with the network. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +acl_type: + description: Access type of the network (Domain, Account). + returned: success + type: string + sample: Account +broadcast_domaintype: + description: Broadcast domain type of the network. + returned: success + type: string + sample: Vlan +type: + description: Type of the network. + returned: success + type: string + sample: Isolated +traffic_type: + description: Traffic type of the network. + returned: success + type: string + sample: Guest +state: + description: State of the network (Allocated, Implemented, Setup). + returned: success + type: string + sample: Allocated +is_persistent: + description: Whether the network is persistent or not. + returned: success + type: boolean + sample: false +network_domain: + description: The network domain + returned: success + type: string + sample: example.local +network_offering: + description: The network offering name. + returned: success + type: string + sample: DefaultIsolatedNetworkOfferingWithSourceNatService +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackNetwork(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.network = None + + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + + def get_vpc(self, key=None): + vpc = self.module.params.get('vpc') + if not vpc: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') + + vpcs = self.cs.listVPCs(**args) + if vpcs: + for v in vpcs['vpc']: + if vpc in [ v['name'], v['displaytext'], v['id'] ]: + return self._get_by_key(key, v) + self.module.fail_json(msg="VPC '%s' not found" % vpc) + + + def get_network_offering(self, key=None): + network_offering = self.module.params.get('network_offering') + if not network_offering: + self.module.fail_json(msg="missing required arguments: network_offering") + + args = {} + args['zoneid'] = self.get_zone(key='id') + + network_offerings = self.cs.listNetworkOfferings(**args) + if network_offerings: + for no in network_offerings['networkoffering']: + if network_offering in [ no['name'], no['displaytext'], no['id'] ]: + return self._get_by_key(key, no) + self.module.fail_json(msg="Network offering '%s' not found" % network_offering) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.get_or_fallback('displaytext','name') + args['networkdomain'] = self.module.params.get('network_domain') + args['networkofferingid'] = self.get_network_offering(key='id') + return args + + + def get_network(self): + if not self.network: + network = self.module.params.get('name') + + args = {} + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + networks = self.cs.listNetworks(**args) + if networks: + for n in networks['network']: + if network in [ n['name'], n['displaytext'], n['id']]: + self.network = n + break + return self.network + + + def present_network(self): + network = self.get_network() + if not network: + network = self.create_network(network) + else: + network = self.update_network(network) + return network + + + def update_network(self, network): + args = self._get_args() + args['id'] = network['id'] + + if self._has_changed(args, network): + self.result['changed'] = True + if not self.module.check_mode: + network = self.cs.updateNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def create_network(self, network): + self.result['changed'] = True + + args = self._get_args() + args['acltype'] = self.module.params.get('acl_type') + args['zoneid'] = self.get_zone(key='id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['startip'] = self.module.params.get('start_ip') + args['endip'] = self.get_or_fallback('end_ip', 'start_ip') + args['netmask'] = self.module.params.get('netmask') + args['gateway'] = self.module.params.get('gateway') + args['startipv6'] = self.module.params.get('start_ipv6') + args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6') + args['ip6cidr'] = self.module.params.get('cidr_ipv6') + args['ip6gateway'] = self.module.params.get('gateway_ipv6') + args['vlan'] = self.module.params.get('vlan') + args['isolatedpvlan'] = self.module.params.get('isolated_pvlan') + args['subdomainaccess'] = self.module.params.get('subdomain_access') + args['vpcid'] = self.get_vpc(key='id') + + if not self.module.check_mode: + res = self.cs.createNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + network = res['network'] + return network + + + def restart_network(self): + network = self.get_network() + + if not network: + self.module.fail_json(msg="No network named '%s' found." % self.module.params('name')) + + # Restarting only available for these states + if network['state'].lower() in [ 'implemented', 'setup' ]: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + args['cleanup'] = self.module.params.get('clean_up') + + if not self.module.check_mode: + network = self.cs.restartNetwork(**args) + + if 'errortext' in network: + self.module.fail_json(msg="Failed: '%s'" % network['errortext']) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self._poll_job(network, 'network') + return network + + + def absent_network(self): + network = self.get_network() + if network: + self.result['changed'] = True + + args = {} + args['id'] = network['id'] + + if not self.module.check_mode: + res = self.cs.deleteNetwork(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'network') + return network + + + def get_result(self, network): + if network: + if 'id' in network: + self.result['id'] = network['id'] + if 'name' in network: + self.result['name'] = network['name'] + if 'displaytext' in network: + self.result['displaytext'] = network['displaytext'] + if 'dns1' in network: + self.result['dns1'] = network['dns1'] + if 'dns2' in network: + self.result['dns2'] = network['dns2'] + if 'cidr' in network: + self.result['cidr'] = network['cidr'] + if 'broadcastdomaintype' in network: + self.result['broadcast_domaintype'] = network['broadcastdomaintype'] + if 'netmask' in network: + self.result['netmask'] = network['netmask'] + if 'gateway' in network: + self.result['gateway'] = network['gateway'] + if 'ip6cidr' in network: + self.result['cidr_ipv6'] = network['ip6cidr'] + if 'ip6gateway' in network: + self.result['gateway_ipv6'] = network['ip6gateway'] + if 'state' in network: + self.result['state'] = network['state'] + if 'type' in network: + self.result['type'] = network['type'] + if 'traffictype' in network: + self.result['traffic_type'] = network['traffictype'] + if 'zone' in network: + self.result['zone'] = network['zonename'] + if 'domain' in network: + self.result['domain'] = network['domain'] + if 'account' in network: + self.result['account'] = network['account'] + if 'project' in network: + self.result['project'] = network['project'] + if 'acltype' in network: + self.result['acl_type'] = network['acltype'] + if 'networkdomain' in network: + self.result['network_domain'] = network['networkdomain'] + if 'networkofferingname' in network: + self.result['network_offering'] = network['networkofferingname'] + if 'ispersistent' in network: + self.result['is_persistent'] = network['ispersistent'] + if 'tags' in network: + self.result['tags'] = [] + for tag in network['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + network_offering = dict(default=None), + zone = dict(default=None), + start_ip = dict(default=None), + end_ip = dict(default=None), + gateway = dict(default=None), + netmask = dict(default=None), + start_ipv6 = dict(default=None), + end_ipv6 = dict(default=None), + cidr_ipv6 = dict(default=None), + gateway_ipv6 = dict(default=None), + vlan = dict(default=None), + vpc = dict(default=None), + isolated_pvlan = dict(default=None), + clean_up = dict(default=None), + network_domain = dict(default=None), + state = dict(choices=['present', 'absent', 'restarted' ], default='present'), + acl_type = dict(choices=['account', 'domain'], default='account'), + project = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None, no_log=True), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['start_ip', 'netmask', 'gateway'], + ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_network = AnsibleCloudStackNetwork(module) + + state = module.params.get('state') + if state in ['absent']: + network = acs_network.absent_network() + + elif state in ['restarted']: + network = acs_network.restart_network() + + else: + network = acs_network.present_network() + + result = acs_network.get_result(network) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 51cf9a029a3967ba2bc84fb9aa25b2cb3c71c423 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 11 Jun 2015 11:36:34 -0500 Subject: [PATCH 658/720] Add new module 'expect' --- commands/__init__.py | 0 commands/expect.py | 189 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 commands/__init__.py create mode 100644 commands/expect.py diff --git a/commands/__init__.py b/commands/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/commands/expect.py b/commands/expect.py new file mode 100644 index 00000000000..0922ba4e464 --- /dev/null +++ b/commands/expect.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import datetime + +try: + import pexpect + HAS_PEXPECT = True +except ImportError: + HAS_PEXPECT = False + + +DOCUMENTATION = ''' +--- +module: expect +version_added: 2.0 +short_description: Executes a command and responds to prompts +description: + - The M(expect) module executes a command and responds to prompts + - The given command will be executed on all selected nodes. It will not be + processed through the shell, so variables like C($HOME) and operations + like C("<"), C(">"), C("|"), and C("&") will not work +options: + command: + description: + - the command module takes command to run. + required: true + creates: + description: + - a filename, when it already exists, this step will B(not) be run. + required: false + removes: + description: + - a filename, when it does not exist, this step will B(not) be run. + required: false + chdir: + description: + - cd into this directory before running the command + required: false + executable: + description: + - change the shell used to execute the command. Should be an absolute + path to the executable. + required: false + responses: + description: + - Mapping of expected string and string to respond with + required: true + timeout: + description: + - Amount of time in seconds to wait for the expected strings + default: 30 + echo: + description: + - Whether or not to echo out your response strings + default: false +requirements: + - python >= 2.6 + - pexpect >= 3.3 +notes: + - If you want to run a command through the shell (say you are using C(<), + C(>), C(|), etc), you must specify a shell in the command such as + C(/bin/bash -c "/path/to/something | grep else") +author: '"Matt Martz (@sivel)" ' +''' + +EXAMPLES = ''' +- expect: + command: passwd username + responses: + (?i)password: "MySekretPa$$word" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(required=True), + chdir=dict(), + executable=dict(), + creates=dict(), + removes=dict(), + responses=dict(type='dict', required=True), + timeout=dict(type='int', default=30), + echo=dict(type='bool', default=False), + ) + ) + + if not HAS_PEXPECT: + module.fail_json(msg='The pexpect python module is required') + + chdir = module.params['chdir'] + executable = module.params['executable'] + args = module.params['command'] + creates = module.params['creates'] + removes = module.params['removes'] + responses = module.params['responses'] + timeout = module.params['timeout'] + echo = module.params['echo'] + + events = dict() + for key, value in responses.iteritems(): + events[key.decode()] = u'%s\n' % value.rstrip('\n').decode() + + if args.strip() == '': + module.fail_json(rc=256, msg="no command given") + + if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) + os.chdir(chdir) + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + v = os.path.expanduser(creates) + if os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s exists" % v, + changed=False, + stderr=False, + rc=0 + ) + + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + v = os.path.expanduser(removes) + if not os.path.exists(v): + module.exit_json( + cmd=args, + stdout="skipped, since %s does not exist" % v, + changed=False, + stderr=False, + rc=0 + ) + + startd = datetime.datetime.now() + + if executable: + cmd = '%s %s' % (executable, args) + else: + cmd = args + + try: + out, rc = pexpect.runu(cmd, timeout=timeout, withexitstatus=True, + events=events, cwd=chdir, echo=echo) + except pexpect.ExceptionPexpect, e: + module.fail_json(msg='%s' % e) + + endd = datetime.datetime.now() + delta = endd - startd + + if out is None: + out = '' + + module.exit_json( + cmd=args, + stdout=out.rstrip('\r\n'), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + ) + +# import module snippets +from ansible.module_utils.basic import * + +main() From ae75c26f870b92880d801c0968578e18f7eeddbc Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 11 Jun 2015 12:36:47 -0500 Subject: [PATCH 659/720] Remove the executable option as it's redundant --- commands/expect.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/commands/expect.py b/commands/expect.py index 0922ba4e464..124c718b73b 100644 --- a/commands/expect.py +++ b/commands/expect.py @@ -54,11 +54,6 @@ options: description: - cd into this directory before running the command required: false - executable: - description: - - change the shell used to execute the command. Should be an absolute - path to the executable. - required: false responses: description: - Mapping of expected string and string to respond with @@ -94,7 +89,6 @@ def main(): argument_spec=dict( command=dict(required=True), chdir=dict(), - executable=dict(), creates=dict(), removes=dict(), responses=dict(type='dict', required=True), @@ -107,7 +101,6 @@ def main(): module.fail_json(msg='The pexpect python module is required') chdir = module.params['chdir'] - executable = module.params['executable'] args = module.params['command'] creates = module.params['creates'] removes = module.params['removes'] @@ -156,13 +149,8 @@ def main(): startd = datetime.datetime.now() - if executable: - cmd = '%s %s' % (executable, args) - else: - cmd = args - try: - out, rc = pexpect.runu(cmd, timeout=timeout, withexitstatus=True, + out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True, events=events, cwd=chdir, echo=echo) except pexpect.ExceptionPexpect, e: module.fail_json(msg='%s' % e) From ea0f0ec7d37493c0f87dc3feffa832a7932cb938 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:49:37 -0400 Subject: [PATCH 660/720] remove extraneous imports --- cloud/amazon/cloudtrail.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 6a1885d6ee7..d6ed254df91 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -90,11 +90,6 @@ EXAMPLES = """ local_action: cloudtrail state=absent name=main region=us-east-1 """ -import time -import sys -import os -from collections import Counter - boto_import_failed = False try: import boto From a86c8ab02553a99d554d6e5630646b0ae5031319 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:49:59 -0400 Subject: [PATCH 661/720] There is no absent, only disabled --- cloud/amazon/cloudtrail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index d6ed254df91..eb445768ed5 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -87,7 +87,7 @@ EXAMPLES = """ s3_key_prefix='' region=us-east-1 - name: remove cloudtrail - local_action: cloudtrail state=absent name=main region=us-east-1 + local_action: cloudtrail state=disabled name=main region=us-east-1 """ boto_import_failed = False From 59c3913e0bb6e3800297d270edc034a7952b26bb Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 00:50:27 -0400 Subject: [PATCH 662/720] Fix boto library checking --- cloud/amazon/cloudtrail.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index eb445768ed5..5a87f35e918 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -90,13 +90,14 @@ EXAMPLES = """ local_action: cloudtrail state=disabled name=main region=us-east-1 """ -boto_import_failed = False +HAS_BOTO = False try: import boto import boto.cloudtrail from boto.regioninfo import RegionInfo + HAS_BOTO = True except ImportError: - boto_import_failed = True + HAS_BOTO = False class CloudTrailManager: """Handles cloudtrail configuration""" @@ -147,9 +148,6 @@ class CloudTrailManager: def main(): - if not has_libcloud: - module.fail_json(msg='boto is required.') - argument_spec = ec2_argument_spec() argument_spec.update(dict( state={'required': True, 'choices': ['enabled', 'disabled'] }, @@ -161,6 +159,10 @@ def main(): required_together = ( ['state', 's3_bucket_name'] ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + + if not HAS_BOTO: + module.fail_json(msg='Alex sucks boto is required.') + ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key) From 90b6f0fe68187f4067d716c3313d1fb837fb906c Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Fri, 12 Jun 2015 01:31:45 -0400 Subject: [PATCH 663/720] Error message typo --- cloud/amazon/cloudtrail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 5a87f35e918..962473e6a9e 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -161,7 +161,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) if not HAS_BOTO: - module.fail_json(msg='Alex sucks boto is required.') + module.fail_json(msg='boto is required.') ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, From 0c6e5b9eb4a443aeda5ac377518ade16172d2d82 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 14:11:38 -0400 Subject: [PATCH 664/720] fixed doc issues --- network/nmcli.py | 71 ++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 18f0ecbab1f..45043fd2807 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -25,6 +25,7 @@ module: nmcli author: Chris Long short_description: Manage Networking requirements: [ nmcli, dbus ] +version_added: "2.0" description: - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc. options: @@ -39,11 +40,11 @@ options: choices: [ "yes", "no" ] description: - Whether the connection should start on boot. - - Whether the connection profile can be automatically activated ( default: yes) + - Whether the connection profile can be automatically activated conn_name: required: True description: - - Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-] + - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: [-][-]' ifname: required: False default: conn_name @@ -60,9 +61,9 @@ options: mode: required: False choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ] - default: None + default: balence-rr description: - - This is the type of device or network connection that you wish to create for a bond, team or bridge. (NetworkManager default: balance-rr) + - This is the type of device or network connection that you wish to create for a bond, team or bridge. master: required: False default: None @@ -72,35 +73,35 @@ options: required: False default: None description: - - The IPv4 address to this interface using this format ie: "192.168.1.24/24" + - 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"' gw4: required: False description: - - The IPv4 gateway for this interface using this format ie: "192.168.100.1" + - 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"' dns4: required: False default: None description: - - A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ['"8.8.8.8 8.8.4.4"'] + - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]' ip6: required: False default: None description: - - The IPv6 address to this interface using this format ie: "abbe::cafe" + - 'The IPv6 address to this interface using this format ie: "abbe::cafe"' gw6: required: False default: None description: - - The IPv6 gateway for this interface using this format ie: "2001:db8::1" + - 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"' dns6: required: False description: - - A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ['"2001:4860:4860::8888 2001:4860:4860::8844"'] + - 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]' mtu: required: False - default: None + default: 1500 description: - - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. (NetworkManager default: 1500) + - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband) primary: required: False @@ -109,24 +110,24 @@ options: - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname' miimon: required: False - default: None + default: 100 description: - - This is only used with bond - miimon (NetworkManager default: 100) + - This is only used with bond - miimon downdelay: required: False default: None description: - - This is only used with bond - downdelay (NetworkManager default: 0) + - This is only used with bond - downdelay updelay: required: False default: None description: - - This is only used with bond - updelay (NetworkManager default: 0) + - This is only used with bond - updelay arp_interval: required: False default: None description: - - This is only used with bond - ARP interval (NetworkManager default: 0) + - This is only used with bond - ARP interval arp_ip_target: required: False default: None @@ -139,49 +140,49 @@ options: - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge priority: required: False - default: None + default: 128 description: - - This is only used with 'bridge' - sets STP priority (NetworkManager default: 128) + - This is only used with 'bridge' - sets STP priority forwarddelay: required: False - default: None + default: 15 description: - - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds (NetworkManager default: 15) + - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds hellotime: required: False - default: None + default: 2 description: - - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds (NetworkManager default: 2) + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds maxage: required: False - default: None + default: 20 description: - - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds (NetworkManager default: 20) + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds ageingtime: required: False - default: None + default: 300 description: - - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds (NetworkManager default: 300) + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds mac: required: False default: None description: - - This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel) + - 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)' slavepriority: required: False - default: None + default: 32 description: - - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave (default: 32) + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave path_cost: required: False - default: None + default: 100 description: - - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave (NetworkManager default: 100) + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave hairpin: required: False - default: None + default: yes description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. (NetworkManager default: yes) + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on. vlanid: required: False default: None @@ -1066,4 +1067,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From 68dc905b5fa57874dc4161c88ea981cb52485a36 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 12 Apr 2015 23:09:45 +0200 Subject: [PATCH 665/720] cloudstack: add new module cs_template --- cloud/cloudstack/cs_template.py | 633 ++++++++++++++++++++++++++++++++ 1 file changed, 633 insertions(+) create mode 100644 cloud/cloudstack/cs_template.py diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py new file mode 100644 index 00000000000..48f00fad553 --- /dev/null +++ b/cloud/cloudstack/cs_template.py @@ -0,0 +1,633 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_template +short_description: Manages templates on Apache CloudStack based clouds. +description: + - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates. +version_added: '2.0' +author: '"René Moser (@resmo)" ' +options: + name: + description: + - Name of the template. + required: true + url: + description: + - URL of where the template is hosted. + - Mutually exclusive with C(vm). + required: false + default: null + vm: + description: + - VM name the template will be created from its volume or alternatively from a snapshot. + - VM must be in stopped state if created from its volume. + - Mutually exclusive with C(url). + required: false + default: null + snapshot: + description: + - Name of the snapshot, created from the VM ROOT volume, the template will be created from. + - C(vm) is required together with this argument. + required: false + default: null + os_type: + description: + - OS type that best represents the OS of this template. + required: false + default: null + checksum: + description: + - The MD5 checksum value of this template. + - If set, we search by checksum instead of name. + required: false + default: false + is_ready: + description: + - This flag is used for searching existing templates. + - If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed. + - Recommended to set it to C(false). + required: false + default: false + is_public: + description: + - Register the template to be publicly available to all users. + - Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the template to be featured. + - Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory. + - Only used if C(state) is present. + required: false + default: false + project: + description: + - Name of the project the template to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the template to be registered or deleted from. + - If not specified, first found zone will be used. + required: false + default: null + template_filter: + description: + - Name of the filter used to search for the template. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ] + hypervisor: + description: + - Name the hypervisor to be used for creating the new template. + - Relevant when using C(state=present). + required: false + default: none + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + requires_hvm: + description: + - true if this template requires HVM. + required: false + default: false + password_enabled: + description: + - True if the template supports the password reset feature. + required: false + default: false + template_tag: + description: + - the tag for this template. + required: false + default: null + sshkey_enabled: + description: + - True if the template supports the sshkey upload feature. + required: false + default: false + is_routing: + description: + - True if the template type is routing i.e., if template is used to deploy router. + - Only considered if C(url) is used. + required: false + default: false + format: + description: + - The format for the template. + - Relevant when using C(state=present). + required: false + default: null + choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ] + is_extractable: + description: + - True if the template or its derivatives are extractable. + required: false + default: false + details: + description: + - Template details in key/value pairs. + required: false + default: null + bits: + description: + - 32 or 64 bits support. + required: false + default: '64' + displaytext: + description: + - the display text of the template. + required: true + default: null + state: + description: + - State of the template. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Register a systemvm template +- local_action: + module: cs_template + name: systemvm-4.5 + url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova" + hypervisor: VMware + format: OVA + zone: tokio-ix + os_type: Debian GNU/Linux 7(64-bit) + is_routing: yes + +# Create a template from a stopped virtual machine's volume +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Create a template from a virtual machine's root volume snapshot +- local_action: + module: cs_template + name: debian-base-template + vm: debian-base-vm + snapshot: ROOT-233_2015061509114 + os_type: Debian GNU/Linux 7(64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + +# Remove a template +- local_action: + module: cs_template + name: systemvm-4.2 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the template. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Displaytext of the template. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +checksum: + description: MD5 checksum of the template. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +status: + description: Status of the template. + returned: success + type: string + sample: Download Complete +is_ready: + description: True if the template is ready to be deployed from. + returned: success + type: boolean + sample: true +is_public: + description: True if the template is public. + returned: success + type: boolean + sample: true +is_featured: + description: True if the template is featured. + returned: success + type: boolean + sample: true +is_extractable: + description: True if the template is extractable. + returned: success + type: boolean + sample: true +format: + description: Format of the template. + returned: success + type: string + sample: OVA +os_type: + description: Typo of the OS. + returned: success + type: string + sample: CentOS 6.5 (64-bit) +password_enabled: + description: True if the reset password feature is enabled, false otherwise. + returned: success + type: boolean + sample: false +sshkey_enabled: + description: true if template is sshkey enabled, false otherwise. + returned: success + type: boolean + sample: false +cross_zones: + description: true if the template is managed across all zones, false otherwise. + returned: success + type: boolean + sample: false +template_type: + description: Type of the template. + returned: success + type: string + sample: USER +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +template_tag: + description: Template tag related to this template. + returned: success + type: string + sample: special +hypervisor: + description: Hypervisor related to this template. + returned: success + type: string + sample: VMware +tags: + description: List of resource tags associated with the template. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +zone: + description: Name of zone the template is registered in. + returned: success + type: string + sample: zuerich +domain: + description: Domain the template is related to. + returned: success + type: string + sample: example domain +account: + description: Account the template is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the template is related to. + returned: success + type: string + sample: Production +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackTemplate(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + + + def _get_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('displaytext') + args['bits'] = self.module.params.get('bits') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isextractable'] = self.module.params.get('is_extractable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + args['passwordenabled'] = self.module.params.get('password_enabled') + args['requireshvm'] = self.module.params.get('requires_hvm') + args['templatetag'] = self.module.params.get('template_tag') + args['ostypeid'] = self.get_os_type(key='id') + + if not args['ostypeid']: + self.module.fail_json(msg="Missing required arguments: os_type") + + if not args['displaytext']: + args['displaytext'] = self.module.params.get('name') + return args + + + def get_root_volume(self, key=None): + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['virtualmachineid'] = self.get_vm(key='id') + args['type'] = "ROOT" + + volumes = self.cs.listVolumes(**args) + if volumes: + return self._get_by_key(key, volumes['volume'][0]) + self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name')) + + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['volumeid'] = self.get_root_volume('id') + snapshots = self.cs.listSnapshots(**args) + if snapshots: + for s in snapshots['snapshot']: + if snapshot in [ s['name'], s['id'] ]: + return self._get_by_key(key, s) + self.module.fail_json(msg="Snapshot '%s' not found" % snapshot) + + + def create_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + + args = self._get_args() + snapshot_id = self.get_snapshot(key='id') + if snapshot_id: + args['snapshotid'] = snapshot_id + else: + args['volumeid'] = self.get_root_volume('id') + + if not self.module.check_mode: + template = self.cs.createTemplate(**args) + + if 'errortext' in template: + self.module.fail_json(msg="Failed: '%s'" % template['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self._poll_job(template, 'template') + return template + + + def register_template(self): + template = self.get_template() + if not template: + self.result['changed'] = True + args = self._get_args() + args['url'] = self.module.params.get('url') + args['format'] = self.module.params.get('format') + args['checksum'] = self.module.params.get('checksum') + args['isextractable'] = self.module.params.get('is_extractable') + args['isrouting'] = self.module.params.get('is_routing') + args['sshkeyenabled'] = self.module.params.get('sshkey_enabled') + args['hypervisor'] = self.get_hypervisor() + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + if not self.module.check_mode: + res = self.cs.registerTemplate(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + template = res['template'] + return template + + + def get_template(self): + args = {} + args['isready'] = self.module.params.get('is_ready') + args['templatefilter'] = self.module.params.get('template_filter') + args['zoneid'] = self.get_zone(key='id') + args['domainid'] = self.get_domain(key='id') + args['account'] = self.get_account(key='name') + args['projectid'] = self.get_project(key='id') + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + templates = self.cs.listTemplates(**args) + if templates: + # if checksum is set, we only look on that. + if not checksum: + return templates['template'][0] + else: + for i in templates['template']: + if i['checksum'] == checksum: + return i + return None + + + def remove_template(self): + template = self.get_template() + if template: + self.result['changed'] = True + + args = {} + args['id'] = template['id'] + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.cs.deleteTemplate(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self._poll_job(res, 'template') + return template + + + def get_result(self, template): + if template: + if 'displaytext' in template: + self.result['displaytext'] = template['displaytext'] + if 'name' in template: + self.result['name'] = template['name'] + if 'hypervisor' in template: + self.result['hypervisor'] = template['hypervisor'] + if 'zonename' in template: + self.result['zone'] = template['zonename'] + if 'checksum' in template: + self.result['checksum'] = template['checksum'] + if 'format' in template: + self.result['format'] = template['format'] + if 'isready' in template: + self.result['is_ready'] = template['isready'] + if 'ispublic' in template: + self.result['is_public'] = template['ispublic'] + if 'isfeatured' in template: + self.result['is_featured'] = template['isfeatured'] + if 'isextractable' in template: + self.result['is_extractable'] = template['isextractable'] + # and yes! it is really camelCase! + if 'crossZones' in template: + self.result['cross_zones'] = template['crossZones'] + if 'ostypename' in template: + self.result['os_type'] = template['ostypename'] + if 'templatetype' in template: + self.result['template_type'] = template['templatetype'] + if 'passwordenabled' in template: + self.result['password_enabled'] = template['passwordenabled'] + if 'sshkeyenabled' in template: + self.result['sshkey_enabled'] = template['sshkeyenabled'] + if 'status' in template: + self.result['status'] = template['status'] + if 'created' in template: + self.result['created'] = template['created'] + if 'templatetag' in template: + self.result['template_tag'] = template['templatetag'] + if 'tags' in template: + self.result['tags'] = [] + for tag in template['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'domain' in template: + self.result['domain'] = template['domain'] + if 'account' in template: + self.result['account'] = template['account'] + if 'project' in template: + self.result['project'] = template['project'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + displaytext = dict(default=None), + url = dict(default=None), + vm = dict(default=None), + snapshot = dict(default=None), + os_type = dict(default=None), + is_ready = dict(type='bool', choices=BOOLEANS, default=False), + is_public = dict(type='bool', choices=BOOLEANS, default=True), + is_featured = dict(type='bool', choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False), + is_extractable = dict(type='bool', choices=BOOLEANS, default=False), + is_routing = dict(type='bool', choices=BOOLEANS, default=False), + checksum = dict(default=None), + template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None), + requires_hvm = dict(type='bool', choices=BOOLEANS, default=False), + password_enabled = dict(type='bool', choices=BOOLEANS, default=False), + template_tag = dict(default=None), + sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False), + format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None), + details = dict(default=None), + bits = dict(type='int', choices=[ 32, 64 ], default=64), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(choices=['get', 'post'], default='get'), + api_timeout = dict(type='int', default=10), + ), + mutually_exclusive = ( + ['url', 'vm'], + ), + required_together = ( + ['api_key', 'api_secret', 'api_url'], + ['format', 'url', 'hypervisor'], + ), + required_one_of = ( + ['url', 'vm'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_tpl = AnsibleCloudStackTemplate(module) + + state = module.params.get('state') + if state in ['absent']: + tpl = acs_tpl.remove_template() + else: + url = module.params.get('url') + if url: + tpl = acs_tpl.register_template() + else: + tpl = acs_tpl.create_template() + + result = acs_tpl.get_result(tpl) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + except Exception, e: + module.fail_json(msg='Exception: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From ad845a59b0df3de044810845d15133d0d1fe214f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 15 Jun 2015 12:12:49 +0200 Subject: [PATCH 666/720] cloudstack: fix clean_up arg to be boolean in cs_network --- cloud/cloudstack/cs_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index c8b3b32539d..e22eaf0a5c3 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -116,7 +116,7 @@ options: - Cleanup old network elements. - Only considered on C(state=restarted). required: false - default: null + default: false acl_type: description: - Access control type. @@ -584,7 +584,7 @@ def main(): vlan = dict(default=None), vpc = dict(default=None), isolated_pvlan = dict(default=None), - clean_up = dict(default=None), + clean_up = dict(type='bool', choices=BOOLEANS, default=False), network_domain = dict(default=None), state = dict(choices=['present', 'absent', 'restarted' ], default='present'), acl_type = dict(choices=['account', 'domain'], default='account'), From 59c57ee798d4733186bd50c3179db9d0f744c918 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 11:32:48 -0400 Subject: [PATCH 667/720] Changing maintainer for this module --- cloud/amazon/cloudtrail.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 962473e6a9e..1c9313bbf7b 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -21,7 +21,9 @@ short_description: manage CloudTrail creation and deletion description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. version_added: "2.0" -author: "Ted Timmons (@tedder)" +author: + - "Ansible Core Team" + - "Ted Timmons" requirements: - "boto >= 2.21" options: From d831c6a924dab2eb5eba965d489ea37fda217453 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 11:41:31 -0400 Subject: [PATCH 668/720] Adding author info --- cloud/amazon/ec2_win_password.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index 33a6ae7f947..b9cb029499a 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -7,7 +7,7 @@ short_description: gets the default administrator password for ec2 windows insta description: - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto. version_added: "2.0" -author: Rick Mendes +author: "Rick Mendes (@rickmendes)" options: instance_id: description: From dc519fb848c13c3347d1c6c441ba843581eb1fdf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 12:30:47 -0400 Subject: [PATCH 669/720] Add author data --- network/nmcli.py | 2 +- windows/win_chocolatey.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/network/nmcli.py b/network/nmcli.py index 45043fd2807..c674114a32e 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -22,7 +22,7 @@ DOCUMENTATION=''' --- module: nmcli -author: Chris Long +author: "Chris Long (@alcamie101)" short_description: Manage Networking requirements: [ nmcli, dbus ] version_added: "2.0" diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index fe00f2e0f6a..7f399dbd22f 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -75,7 +75,7 @@ options: require: false default: null aliases: [] -author: Trond Hindenes, Peter Mounce, Pepe Barbe, Adam Keech +author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)" ''' # TODO: From 728f2f1bb84ceb278f32f9131019626f361c4218 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 12:55:51 -0400 Subject: [PATCH 670/720] Adding the list of valid module reviewers --- REVIEWERS.md | 160 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 REVIEWERS.md diff --git a/REVIEWERS.md b/REVIEWERS.md new file mode 100644 index 00000000000..985afce7bea --- /dev/null +++ b/REVIEWERS.md @@ -0,0 +1,160 @@ +New module reviewers +==================== +The following list represents all current Github module reviewers. It's currently comprised of all Ansible module authors, past and present. + +Two +1 votes by any of these module reviewers on a new module pull request will result in the inclusion of that module into Ansible Extras. + +Active +====== +"Adam Garside (@fabulops)" +"Adam Keech (@smadam813)" +"Adam Miller (@maxamillion)" +"Alex Coomans (@drcapulet)" +"Alexander Bulimov (@abulimov)" +"Alexander Saltanov (@sashka)" +"Alexander Winkler (@dermute)" +"Andrew de Quincey (@adq)" +"André Paramés (@andreparames)" +"Andy Hill (@andyhky)" +"Artūras `arturaz` Šlajus (@arturaz)" +"Augustus Kling (@AugustusKling)" +"BOURDEL Paul (@pb8226)" +"Balazs Pocze (@banyek)" +"Ben Whaley (@bwhaley)" +"Benno Joy (@bennojoy)" +"Bernhard Weitzhofer (@b6d)" +"Boyd Adamson (@brontitall)" +"Brad Olson (@bradobro)" +"Brian Coca (@bcoca)" +"Brice Burgess (@briceburg)" +"Bruce Pennypacker (@bpennypacker)" +"Carson Gee (@carsongee)" +"Chris Church (@cchurch)" +"Chris Hoffman (@chrishoffman)" +"Chris Long (@alcamie101)" +"Chris Schmidt (@chrisisbeef)" +"Christian Berendt (@berendt)" +"Christopher H. Laco (@claco)" +"Cristian van Ee (@DJMuggs)" +"Dag Wieers (@dagwieers)" +"Dane Summers (@dsummersl)" +"Daniel Jaouen (@danieljaouen)" +"Daniel Schep (@dschep)" +"Dariusz Owczarek (@dareko)" +"Darryl Stoflet (@dstoflet)" +"David CHANIAL (@davixx)" +"David Stygstra (@stygstra)" +"Derek Carter (@goozbach)" +"Dimitrios Tydeas Mengidis (@dmtrs)" +"Doug Luce (@dougluce)" +"Dylan Martin (@pileofrogs)" +"Elliott Foster (@elliotttf)" +"Eric Johnson (@erjohnso)" +"Evan Duffield (@scicoin-project)" +"Evan Kaufman (@EvanK)" +"Evgenii Terechkov (@evgkrsk)" +"Franck Cuny (@franckcuny)" +"Gareth Rushgrove (@garethr)" +"Hagai Kariti (@hkariti)" +"Hector Acosta (@hacosta)" +"Hiroaki Nakamura (@hnakamur)" +"Ivan Vanderbyl (@ivanvanderbyl)" +"Jakub Jirutka (@jirutka)" +"James Cammarata (@jimi-c)" +"James Laska (@jlaska)" +"James S. Martin (@jsmartin)" +"Jan-Piet Mens (@jpmens)" +"Jayson Vantuyl (@jvantuyl)" +"Jens Depuydt (@jensdepuydt)" +"Jeroen Hoekx (@jhoekx)" +"Jesse Keating (@j2sol)" +"Jim Dalton (@jsdalton)" +"Jim Richardson (@weaselkeeper)" +"Jimmy Tang (@jcftang)" +"Johan Wiren (@johanwiren)" +"John Dewey (@retr0h)" +"John Jarvis (@jarv)" +"John Whitbeck (@jwhitbeck)" +"Jon Hawkesworth (@jhawkesworth)" +"Jonas Pfenniger (@zimbatm)" +"Jonathan I. Davila (@defionscode)" +"Joseph Callen (@jcpowermac)" +"Kevin Carter (@cloudnull)" +"Lester Wade (@lwade)" +"Lorin Hochstein (@lorin)" +"Manuel Sousa (@manuel-sousa)" +"Mark Theunissen (@marktheunissen)" +"Matt Coddington (@mcodd)" +"Matt Hite (@mhite)" +"Matt Makai (@makaimc)" +"Matt Martz (@sivel)" +"Matt Wright (@mattupstate)" +"Matthew Vernon (@mcv21)" +"Matthew Williams (@mgwilliams)" +"Matthias Vogelgesang (@matze)" +"Max Riveiro (@kavu)" +"Michael Gregson (@mgregson)" +"Michael J. Schultz (@mjschultz)" +"Michael Warkentin (@mwarkentin)" +"Mischa Peters (@mischapeters)" +"Monty Taylor (@emonty)" +"Nandor Sivok (@dominis)" +"Nate Coraor (@natefoo)" +"Nate Kingsley (@nate-kingsley)" +"Nick Harring (@NickatEpic)" +"Patrick Callahan (@dirtyharrycallahan)" +"Patrick Ogenstad (@ogenstad)" +"Patrick Pelletier (@skinp)" +"Patrik Lundin (@eest)" +"Paul Durivage (@angstwad)" +"Pavel Antonov (@softzilla)" +"Pepe Barbe (@elventear)" +"Peter Mounce (@petemounce)" +"Peter Oliver (@mavit)" +"Peter Sprygada (@privateip)" +"Peter Tan (@tanpeter)" +"Philippe Makowski (@pmakowski)" +"Phillip Gentry, CX Inc (@pcgentry)" +"Quentin Stafford-Fraser (@quentinsf)" +"Ramon de la Fuente (@ramondelafuente)" +"Raul Melo (@melodous)" +"Ravi Bhure (@ravibhure)" +"René Moser (@resmo)" +"Richard Hoop (@rhoop) " +"Richard Isaacson (@risaacson)" +"Rick Mendes (@rickmendes)" +"Romeo Theriault (@romeotheriault)" +"Scott Anderson (@tastychutney)" +"Sebastian Kornehl (@skornehl)" +"Serge van Ginderachter (@srvg)" +"Sergei Antipov (@UnderGreen)" +"Seth Edwards (@sedward)" +"Silviu Dicu (@silviud) " +"Simon JAILLET (@jails)" +"Stephen Fromm (@sfromm)" +"Steve (@groks)" +"Steve Gargan (@sgargan)" +"Steve Smith (@tarka)" +"Takashi Someda (@tksmd)" +"Taneli Leppä (@rosmo)" +"Tim Bielawa (@tbielawa)" +"Tim Bielawa (@tbielawa)" +"Tim Mahoney (@timmahoney)" +"Timothy Appnel (@tima)" +"Tom Bamford (@tombamford)" +"Trond Hindenes (@trondhindenes)" +"Vincent Van der Kussen (@vincentvdk)" +"Vincent Viallet (@zbal)" +"WAKAYAMA Shirou (@shirou)" +"Will Thames (@willthames)" +"Willy Barro (@willybarro)" +"Xabier Larrakoetxea (@slok)" +"Yeukhon Wong (@yeukhon)" +"Zacharie Eakin (@zeekin)" +"berenddeboer (@berenddeboer)" +"bleader (@bleader)" +"curtis (@ccollicutt)" + +Retired +======= +None yet :) From 004dedba8a8c4ba3c118744c37e5d5c238315345 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 14:32:39 -0400 Subject: [PATCH 671/720] Changes to author formatting, remove emails --- REVIEWERS.md | 4 ++-- cloud/amazon/ec2_eni_facts.py | 2 +- cloud/cloudstack/cs_account.py | 2 +- cloud/cloudstack/cs_affinitygroup.py | 2 +- cloud/cloudstack/cs_firewall.py | 2 +- cloud/cloudstack/cs_instance.py | 2 +- cloud/cloudstack/cs_instancegroup.py | 2 +- cloud/cloudstack/cs_iso.py | 2 +- cloud/cloudstack/cs_network.py | 2 +- cloud/cloudstack/cs_portforward.py | 2 +- cloud/cloudstack/cs_project.py | 2 +- cloud/cloudstack/cs_securitygroup.py | 2 +- cloud/cloudstack/cs_securitygroup_rule.py | 2 +- cloud/cloudstack/cs_sshkeypair.py | 2 +- cloud/cloudstack/cs_template.py | 2 +- cloud/cloudstack/cs_vmsnapshot.py | 2 +- cloud/google/gce_img.py | 2 +- cloud/lxc/lxc_container.py | 2 +- cloud/misc/ovirt.py | 2 +- cloud/misc/virt.py | 4 ++-- cloud/vmware/vmware_datacenter.py | 2 +- clustering/consul.py | 2 +- clustering/consul_acl.py | 2 +- clustering/consul_kv.py | 2 +- clustering/consul_session.py | 2 +- commands/expect.py | 2 +- database/misc/mongodb_user.py | 2 +- database/misc/riak.py | 4 ++-- database/mysql/mysql_replication.py | 2 +- files/patch.py | 4 ++-- messaging/rabbitmq_binding.py | 2 +- messaging/rabbitmq_exchange.py | 2 +- messaging/rabbitmq_policy.py | 2 +- messaging/rabbitmq_queue.py | 2 +- monitoring/airbrake_deployment.py | 2 +- monitoring/boundary_meter.py | 2 +- monitoring/datadog_event.py | 2 +- monitoring/datadog_monitor.py | 2 +- monitoring/logentries.py | 2 +- monitoring/monit.py | 2 +- monitoring/nagios.py | 2 +- monitoring/newrelic_deployment.py | 2 +- monitoring/rollbar_deployment.py | 2 +- monitoring/zabbix_maintenance.py | 2 +- network/a10/a10_server.py | 2 +- network/a10/a10_service_group.py | 2 +- network/a10/a10_virtual_server.py | 2 +- network/citrix/netscaler.py | 2 +- network/f5/bigip_facts.py | 2 +- network/f5/bigip_monitor_http.py | 2 +- network/f5/bigip_monitor_tcp.py | 2 +- network/f5/bigip_node.py | 2 +- network/f5/bigip_pool.py | 2 +- network/f5/bigip_pool_member.py | 2 +- network/haproxy.py | 2 +- network/openvswitch_bridge.py | 2 +- network/openvswitch_port.py | 2 +- notification/campfire.py | 2 +- notification/flowdock.py | 2 +- notification/grove.py | 2 +- notification/mail.py | 2 +- notification/nexmo.py | 2 +- notification/pushover.py | 2 +- notification/sendgrid.py | 2 +- notification/slack.py | 2 +- notification/sns.py | 2 +- notification/twilio.py | 2 +- notification/typetalk.py | 2 +- packaging/language/bower.py | 2 +- packaging/language/composer.py | 2 +- packaging/language/cpanm.py | 2 +- packaging/language/maven_artifact.py | 2 +- packaging/language/npm.py | 2 +- packaging/os/dnf.py | 2 +- packaging/os/homebrew.py | 4 ++-- packaging/os/homebrew_cask.py | 2 +- packaging/os/homebrew_tap.py | 2 +- packaging/os/layman.py | 2 +- packaging/os/openbsd_pkg.py | 2 +- packaging/os/opkg.py | 2 +- packaging/os/pkg5.py | 2 +- packaging/os/pkg5_publisher.py | 2 +- packaging/os/pkgin.py | 4 ++-- packaging/os/pkgng.py | 2 +- packaging/os/pkgutil.py | 2 +- packaging/os/portinstall.py | 2 +- packaging/os/swdepot.py | 2 +- packaging/os/urpmi.py | 2 +- packaging/os/zypper.py | 2 +- packaging/os/zypper_repository.py | 2 +- source_control/bzr.py | 2 +- source_control/github_hooks.py | 2 +- system/alternatives.py | 4 ++-- system/at.py | 2 +- system/capabilities.py | 2 +- system/crypttab.py | 2 +- system/filesystem.py | 2 +- system/firewalld.py | 2 +- system/gluster_volume.py | 2 +- system/kernel_blacklist.py | 2 +- system/known_hosts.py | 2 +- system/lvg.py | 2 +- system/lvol.py | 4 ++-- system/modprobe.py | 6 +++--- system/open_iscsi.py | 2 +- system/ufw.py | 6 +++--- system/zfs.py | 2 +- web_infrastructure/ejabberd_user.py | 2 +- web_infrastructure/jboss.py | 2 +- web_infrastructure/jira.py | 2 +- windows/win_updates.py | 2 +- 111 files changed, 123 insertions(+), 123 deletions(-) diff --git a/REVIEWERS.md b/REVIEWERS.md index 985afce7bea..5ae08b59b02 100644 --- a/REVIEWERS.md +++ b/REVIEWERS.md @@ -120,7 +120,7 @@ Active "Raul Melo (@melodous)" "Ravi Bhure (@ravibhure)" "René Moser (@resmo)" -"Richard Hoop (@rhoop) " +"Richard Hoop (@rhoop)" "Richard Isaacson (@risaacson)" "Rick Mendes (@rickmendes)" "Romeo Theriault (@romeotheriault)" @@ -129,7 +129,7 @@ Active "Serge van Ginderachter (@srvg)" "Sergei Antipov (@UnderGreen)" "Seth Edwards (@sedward)" -"Silviu Dicu (@silviud) " +"Silviu Dicu (@silviud)" "Simon JAILLET (@jails)" "Stephen Fromm (@sfromm)" "Steve (@groks)" diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py index 94b586fb639..76347c84261 100644 --- a/cloud/amazon/ec2_eni_facts.py +++ b/cloud/amazon/ec2_eni_facts.py @@ -20,7 +20,7 @@ short_description: Gather facts about ec2 ENI interfaces in AWS description: - Gather facts about ec2 ENI interfaces in AWS version_added: "2.0" -author: Rob White, wimnat [at] gmail.com, @wimnat +author: "Rob White (@wimnat)" options: eni_id: description: diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 597e4c7394e..cc487af5e51 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -25,7 +25,7 @@ short_description: Manages account on Apache CloudStack based clouds. description: - Create, disable, lock, enable and remove accounts. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 40896942cb1..580cc5d7e8d 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -25,7 +25,7 @@ short_description: Manages affinity groups on Apache CloudStack based clouds. description: - Create and remove affinity groups. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 828aa1faf98..96b3f20f7cf 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -25,7 +25,7 @@ short_description: Manages firewall rules on Apache CloudStack based clouds. description: - Creates and removes firewall rules. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: ip_address: description: diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 46fd66f510d..a93a524383a 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -25,7 +25,7 @@ short_description: Manages instances and virtual machines on Apache CloudStack b description: - Deploy, start, restart, stop and destroy instances. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 396cafa388d..478748aeec3 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -25,7 +25,7 @@ short_description: Manages instance groups on Apache CloudStack based clouds. description: - Create and remove instance groups. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index d9ec6880627..e3ba322f6ba 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -25,7 +25,7 @@ short_description: Manages ISOs images on Apache CloudStack based clouds. description: - Register and remove ISO images. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index e22eaf0a5c3..b602b345677 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -25,7 +25,7 @@ short_description: Manages networks on Apache CloudStack based clouds. description: - Create, update, restart and delete networks. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 00b084d9195..3b88ca85723 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -25,7 +25,7 @@ short_description: Manages port forwarding rules on Apache CloudStack based clou description: - Create, update and remove port forwarding rules. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: ip_address: description: diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index b505433892e..0f391bc5005 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -25,7 +25,7 @@ short_description: Manages projects on Apache CloudStack based clouds. description: - Create, update, suspend, activate and remove projects. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 08fb72c821d..54a71686a6e 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -25,7 +25,7 @@ short_description: Manages security groups on Apache CloudStack based clouds. description: - Create and remove security groups. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 9252e06ce62..e943e7d11c2 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -25,7 +25,7 @@ short_description: Manages security group rules on Apache CloudStack based cloud description: - Add and remove security group rules. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: security_group: description: diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 0a54a1971bc..180e96ca6ae 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -27,7 +27,7 @@ description: - If no key was found and no public key was provided and a new SSH private/public key pair will be created and the private key will be returned. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py index 48f00fad553..1cd245d2b5c 100644 --- a/cloud/cloudstack/cs_template.py +++ b/cloud/cloudstack/cs_template.py @@ -25,7 +25,7 @@ short_description: Manages templates on Apache CloudStack based clouds. description: - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index fb7668640dc..24e8a46fa37 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -25,7 +25,7 @@ short_description: Manages VM snapshots on Apache CloudStack based clouds. description: - Create, remove and revert VM from snapshots. version_added: '2.0' -author: '"René Moser (@resmo)" ' +author: "René Moser (@resmo)" options: name: description: diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 9cc37f8eb33..5775a94794d 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -81,7 +81,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud" -author: '"Peter Tan (@tanpeter)" ' +author: "Peter Tan (@tanpeter)" ''' EXAMPLES = ''' diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 18555e2e351..711c70bca98 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -26,7 +26,7 @@ short_description: Manage LXC Containers version_added: 1.8.0 description: - Management of LXC containers -author: '"Kevin Carter (@cloudnull)" ' +author: "Kevin Carter (@cloudnull)" options: name: description: diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py index 718f25fec2c..6e8f3281dc5 100644 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ovirt -author: '"Vincent Van der Kussen (@vincentvdk)" ' +author: "Vincent Van der Kussen (@vincentvdk)" short_description: oVirt/RHEV platform management description: - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index 343a3eedcf7..80b8e2558eb 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -60,8 +60,8 @@ requirements: - "libvirt-python" author: - "Ansible Core Team" - - '"Michael DeHaan (@mpdehaan)" ' - - '"Seth Vidal (@skvidal)" ' + - "Michael DeHaan" + - "Seth Vidal" ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py index b1e995b965b..b2083222ed5 100644 --- a/cloud/vmware/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -25,7 +25,7 @@ short_description: Manage VMware vSphere Datacenters description: - Manage VMware vSphere Datacenters version_added: 2.0 -author: '"Joseph Callen (@jcpowermac)" ' +author: "Joseph Callen (@jcpowermac)" notes: - Tested on vSphere 5.5 requirements: diff --git a/clustering/consul.py b/clustering/consul.py index 8423ffe418f..083173230f7 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -42,7 +42,7 @@ requirements: - python-consul - requests version_added: "2.0" -author: '"Steve Gargan (@sgargan)" ' +author: "Steve Gargan (@sgargan)" options: state: description: diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index b832281bb80..250de24e2a3 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -30,7 +30,7 @@ requirements: - pyhcl - requests version_added: "2.0" -author: '"Steve Gargan (@sgargan)" ' +author: "Steve Gargan (@sgargan)" options: mgmt_token: description: diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 69a66c746ab..2ba3a0315a3 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -32,7 +32,7 @@ requirements: - python-consul - requests version_added: "2.0" -author: '"Steve Gargan (@sgargan)" ' +author: "Steve Gargan (@sgargan)" options: state: description: diff --git a/clustering/consul_session.py b/clustering/consul_session.py index d57c2b69db8..ef4646c35e4 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -30,7 +30,7 @@ requirements: - python-consul - requests version_added: "2.0" -author: '"Steve Gargan (@sgargan)" ' +author: "Steve Gargan (@sgargan)" options: state: description: diff --git a/commands/expect.py b/commands/expect.py index 124c718b73b..e8f7a049836 100644 --- a/commands/expect.py +++ b/commands/expect.py @@ -73,7 +73,7 @@ notes: - If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you must specify a shell in the command such as C(/bin/bash -c "/path/to/something | grep else") -author: '"Matt Martz (@sivel)" ' +author: "Matt Martz (@sivel)" ''' EXAMPLES = ''' diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 9802f890a35..ede8004945b 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -99,7 +99,7 @@ notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] -author: '"Elliott Foster (@elliotttf)" ' +author: "Elliott Foster (@elliotttf)" ''' EXAMPLES = ''' diff --git a/database/misc/riak.py b/database/misc/riak.py index 4f10775a5ad..12586651887 100644 --- a/database/misc/riak.py +++ b/database/misc/riak.py @@ -27,8 +27,8 @@ description: the status of the cluster. version_added: "1.2" author: - - '"James Martin (@jsmartin)" ' - - '"Drew Kerrigan (@drewkerrigan)" ' + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" options: command: description: diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 898b1510c1d..f5d2d5cf630 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -30,7 +30,7 @@ short_description: Manage MySQL replication description: - Manages MySQL server replication, slave, master status get and change master host. version_added: "1.3" -author: '"Balazs Pocze (@banyek)" ' +author: "Balazs Pocze (@banyek)" options: mode: description: diff --git a/files/patch.py b/files/patch.py index c1a61ce733f..60629c922e9 100644 --- a/files/patch.py +++ b/files/patch.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' --- module: patch author: - - '"Jakub Jirutka (@jirutka)" ' - - '"Luis Alberto Perez Lazaro (@luisperlaz)" ' + - "Jakub Jirutka (@jirutka)" + - "Luis Alberto Perez Lazaro (@luisperlaz)" version_added: 1.9 description: - Apply patch files using the GNU patch tool. diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index b0ae3a38bf7..fc69f490fad 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_binding -author: '"Manuel Sousa (@manuel-sousa)" ' +author: "Manuel Sousa (@manuel-sousa)" version_added: "2.0" short_description: This module manages rabbitMQ bindings diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index 6f3ce143c4a..fb74298879b 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_exchange -author: '"Manuel Sousa (@manuel-sousa)" ' +author: "Manuel Sousa (@manuel-sousa)" version_added: "2.0" short_description: This module manages rabbitMQ exchanges diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py index a4d94decbd1..81d7068ec46 100644 --- a/messaging/rabbitmq_policy.py +++ b/messaging/rabbitmq_policy.py @@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ. description: - Manage the state of a virtual host in RabbitMQ. version_added: "1.5" -author: '"John Dewey (@retr0h)" ' +author: "John Dewey (@retr0h)" options: name: description: diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 105104b3d77..5a403a6b602 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rabbitmq_queue -author: '"Manuel Sousa (@manuel-sousa)" ' +author: "Manuel Sousa (@manuel-sousa)" version_added: "2.0" short_description: This module manages rabbitMQ queues diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index 0036bde7daa..3b54e55e751 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: airbrake_deployment version_added: "1.2" -author: '"Bruce Pennypacker (@bpennypacker)" ' +author: "Bruce Pennypacker (@bpennypacker)" short_description: Notify airbrake about app deployments description: - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index adc2b2433e1..431a6ace1b9 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -34,7 +34,7 @@ short_description: Manage boundary meters description: - This module manages boundary meters version_added: "1.3" -author: '"curtis (@ccollicutt)" ' +author: "curtis (@ccollicutt)" requirements: - Boundary API access - bprobe is required to send data, but not to register a meter diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index d363f8b17dc..ebbad039dec 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -14,7 +14,7 @@ description: - "Allows to post events to DataDog (www.datadoghq.com) service." - "Uses http://docs.datadoghq.com/api/#events API." version_added: "1.3" -author: '"Artūras `arturaz` Šlajus (@arturaz)" ' +author: "Artūras `arturaz` Šlajus (@arturaz)" notes: [] requirements: [urllib2] options: diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index f1acb169ce0..9853d748c2c 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -34,7 +34,7 @@ description: - "Manages monitors within Datadog" - "Options like described on http://docs.datadoghq.com/api/" version_added: "2.0" -author: '"Sebastian Kornehl (@skornehl)" ' +author: "Sebastian Kornehl (@skornehl)" notes: [] requirements: [datadog] options: diff --git a/monitoring/logentries.py b/monitoring/logentries.py index 75ed2e0e6dd..a347afd84c2 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: logentries -author: '"Ivan Vanderbyl (@ivanvanderbyl)" ' +author: "Ivan Vanderbyl (@ivanvanderbyl)" short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime diff --git a/monitoring/monit.py b/monitoring/monit.py index 6410ce815e8..3d3c7c8c3ca 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -39,7 +39,7 @@ options: default: null choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] requirements: [ ] -author: '"Darryl Stoflet (@dstoflet)" ' +author: "Darryl Stoflet (@dstoflet)" ''' EXAMPLES = ''' diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 0026751ea58..16edca2aa6a 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -86,7 +86,7 @@ options: required: true default: null -author: '"Tim Bielawa (@tbielawa)" ' +author: "Tim Bielawa (@tbielawa)" requirements: [ "Nagios" ] ''' diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 166cdcda0be..832e467dea0 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: newrelic_deployment version_added: "1.2" -author: '"Matt Coddington (@mcodd)" ' +author: "Matt Coddington (@mcodd)" short_description: Notify newrelic about app deployments description: - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index dc064d6194d..43e2aa00722 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rollbar_deployment version_added: 1.6 -author: '"Max Riveiro (@kavu)" ' +author: "Max Riveiro (@kavu)" short_description: Notify Rollbar about app deployments description: - Notify Rollbar about app deployments diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 25d7c8df95e..2d611382919 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -26,7 +26,7 @@ short_description: Create Zabbix maintenance windows description: - This module will let you create Zabbix maintenance windows. version_added: "1.8" -author: '"Alexander Bulimov (@abulimov)" ' +author: "Alexander Bulimov (@abulimov)" requirements: - "python >= 2.6" - zabbix-api diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 72ed0f648e6..2ad66c23588 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb server objects on A10 Networks devices via aXAPI -author: '"Mischa Peters (@mischapeters)" ' +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 options: diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py index 8e84bf9a07d..db1c21bc78e 100644 --- a/network/a10/a10_service_group.py +++ b/network/a10/a10_service_group.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb service-group objects on A10 Networks devices via aXAPI -author: '"Mischa Peters (@mischapeters)" ' +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 - When a server doesn't exist and is added to the service-group the server will be created diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py index 3df93f67dbe..eb308a3032a 100644 --- a/network/a10/a10_virtual_server.py +++ b/network/a10/a10_virtual_server.py @@ -28,7 +28,7 @@ version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: '"Mischa Peters (@mischapeters)" ' +author: "Mischa Peters (@mischapeters)" notes: - Requires A10 Networks aXAPI 2.1 requirements: diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index 8f78e23caac..61bc35356e5 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -82,7 +82,7 @@ options: choices: ['yes', 'no'] requirements: [ "urllib", "urllib2" ] -author: '"Nandor Sivok (@dominis)" ' +author: "Nandor Sivok (@dominis)" ''' EXAMPLES = ''' diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 7b78c6d97f7..1b106ba0a3e 100644 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" -author: '"Matt Hite (@mhite)" ' +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 5299bdb0f44..ea24e995e27 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors" description: - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" version_added: "1.4" -author: '"Serge van Ginderachter (@srvg)" ' +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index b5f58da8397..0900e95fd20 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" -author: '"Serge van Ginderachter (@srvg)" ' +author: "Serge van Ginderachter (@srvg)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 49f721aa8c5..28eacc0d6f5 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes" description: - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" version_added: "1.4" -author: '"Matt Hite (@mhite)" ' +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 4d8d599134e..1628f6c68c9 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools" description: - "Manages F5 BIG-IP LTM pools via iControl SOAP API" version_added: "1.2" -author: '"Matt Hite (@mhite)" ' +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 1d59462023f..ec2b7135372 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members" description: - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" version_added: "1.4" -author: '"Matt Hite (@mhite)" ' +author: "Matt Hite (@mhite)" notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" diff --git a/network/haproxy.py b/network/haproxy.py index c897349019e..00fc4ff63a1 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -91,7 +91,7 @@ examples: # enable server in 'www' backend pool with change server(s) weight - haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www -author: "Ravi Bhure (@ravibhure)" +author: "Ravi Bhure (@ravibhure)" ''' import socket diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py index 28df3e84426..b9ddff562c6 100644 --- a/network/openvswitch_bridge.py +++ b/network/openvswitch_bridge.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_bridge version_added: 1.4 -author: '"David Stygstra (@stygstra)" ' +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch bridges requirements: [ ovs-vsctl ] description: diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index ab87ea42b4a..6f59f4b134b 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: openvswitch_port version_added: 1.4 -author: '"David Stygstra (@stygstra)" ' +author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch ports requirements: [ ovs-vsctl ] description: diff --git a/notification/campfire.py b/notification/campfire.py index 9218826a7b4..2400ad3ba40 100644 --- a/notification/campfire.py +++ b/notification/campfire.py @@ -43,7 +43,7 @@ options: # informational: requirements for nodes requirements: [ urllib2, cgi ] -author: '"Adam Garside (@fabulops)" ' +author: "Adam Garside (@fabulops)" ''' EXAMPLES = ''' diff --git a/notification/flowdock.py b/notification/flowdock.py index aea107457fb..7c42e58644d 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: flowdock version_added: "1.2" -author: '"Matt Coddington (@mcodd)" ' +author: "Matt Coddington (@mcodd)" short_description: Send a message to a flowdock description: - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) diff --git a/notification/grove.py b/notification/grove.py index 5c27b18c30f..85601d1cc78 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -39,7 +39,7 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 -author: '"Jonas Pfenniger (@zimbatm)" ' +author: "Jonas Pfenniger (@zimbatm)" ''' EXAMPLES = ''' diff --git a/notification/mail.py b/notification/mail.py index 4feaebf5d36..c42e80fdabf 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -20,7 +20,7 @@ DOCUMENTATION = """ --- -author: '"Dag Wieers (@dagwieers)" ' +author: "Dag Wieers (@dagwieers)" module: mail short_description: Send an email description: diff --git a/notification/nexmo.py b/notification/nexmo.py index a1dd9c2b64d..d0c3d05e65c 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo description: - Send a SMS message via nexmo version_added: 1.6 -author: '"Matt Martz (@sivel)" ' +author: "Matt Martz (@sivel)" options: api_key: description: diff --git a/notification/pushover.py b/notification/pushover.py index 951c65f43fe..505917189e4 100644 --- a/notification/pushover.py +++ b/notification/pushover.py @@ -48,7 +48,7 @@ options: description: Message priority (see u(https://pushover.net) for details.) required: false -author: '"Jim Richardson (@weaselkeeper)" ' +author: "Jim Richardson (@weaselkeeper)" ''' EXAMPLES = ''' diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 6278f613ee4..78806687e0b 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -53,7 +53,7 @@ options: the desired subject for the email required: true -author: '"Matt Makai (@makaimc)" ' +author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' diff --git a/notification/slack.py b/notification/slack.py index 7e5215479ab..baabe4f58d2 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -24,7 +24,7 @@ short_description: Send Slack notifications description: - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration version_added: 1.6 -author: '"Ramon de la Fuente (@ramondelafuente)" ' +author: "Ramon de la Fuente (@ramondelafuente)" options: domain: description: diff --git a/notification/sns.py b/notification/sns.py index 910105f0ebb..70030d66196 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages description: - The M(sns) module sends notifications to a topic on your Amazon SNS account version_added: 1.6 -author: '"Michael J. Schultz (@mjschultz)" ' +author: "Michael J. Schultz (@mjschultz)" options: msg: description: diff --git a/notification/twilio.py b/notification/twilio.py index 568d0c60a58..e9ec5bcf51e 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -58,7 +58,7 @@ options: (multimedia message) instead of a plain SMS required: false -author: '"Matt Makai (@makaimc)" ' +author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' diff --git a/notification/typetalk.py b/notification/typetalk.py index 8e79a7617ed..638f97ae530 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -26,7 +26,7 @@ options: - message body required: true requirements: [ urllib, urllib2, json ] -author: '"Takashi Someda (@tksmd)" ' +author: "Takashi Someda (@tksmd)" ''' EXAMPLES = ''' diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 8fbe20f7e0c..7af8136a445 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -25,7 +25,7 @@ short_description: Manage bower packages with bower description: - Manage bower packages with bower version_added: 1.9 -author: '"Michael Warkentin (@mwarkentin)" ' +author: "Michael Warkentin (@mwarkentin)" options: name: description: diff --git a/packaging/language/composer.py b/packaging/language/composer.py index cfe3f99b9e7..8e11d25216b 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: composer -author: '"Dimitrios Tydeas Mengidis (@dmtrs)" ' +author: "Dimitrios Tydeas Mengidis (@dmtrs)" short_description: Dependency Manager for PHP version_added: "1.6" description: diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 5549dab8895..02b306b669c 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -73,7 +73,7 @@ examples: description: Install I(Dancer) perl package from a specific mirror notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: '"Franck Cuny (@franckcuny)" ' +author: "Franck Cuny (@franckcuny)" ''' def _is_package_installed(module, name, locallib, cpanm): diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index 057cb0a3814..3e196dd93a5 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -37,7 +37,7 @@ description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not - available. -author: '"Chris Schmidt (@chrisisbeef)" ' +author: "Chris Schmidt (@chrisisbeef)" requirements: - "python >= 2.6" - lxml diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 3eafcd6c2a7..d804efff331 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -25,7 +25,7 @@ short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 -author: '"Chris Hoffman (@chrishoffman)" ' +author: "Chris Hoffman (@chrishoffman)" options: name: description: diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index e40c268f742..7afbee44c54 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -95,7 +95,7 @@ notes: [] requirements: - dnf - yum-utils (for repoquery) -author: '"Cristian van Ee (@DJMuggs)" ' +author: "Cristian van Ee (@DJMuggs)" ''' EXAMPLES = ''' diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 0b37521820d..91888ba6bca 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' --- module: homebrew author: - - '"Daniel Jaouen (@danieljaouen)" ' - - '"Andrew Dunham (@andrew-d)" ' + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" short_description: Package manager for Homebrew description: - Manages Homebrew packages diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index bb5cacabbc7..e1b721a97b4 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: homebrew_cask -author: '"Daniel Jaouen (@danieljaouen)" ' +author: "Daniel Jaouen (@danieljaouen)" short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index 504e77eb062..c6511f0c7b2 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -24,7 +24,7 @@ import re DOCUMENTATION = ''' --- module: homebrew_tap -author: '"Daniel Jaouen (@danieljaouen)" ' +author: "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. diff --git a/packaging/os/layman.py b/packaging/os/layman.py index 3cad5e35642..c9d6b8ed333 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -25,7 +25,7 @@ from urllib2 import Request, urlopen, URLError DOCUMENTATION = ''' --- module: layman -author: '"Jakub Jirutka (@jirutka)" ' +author: "Jakub Jirutka (@jirutka)" version_added: "1.6" short_description: Manage Gentoo overlays description: diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 2f81753fb64..1b5d0bb06b2 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -25,7 +25,7 @@ import syslog DOCUMENTATION = ''' --- module: openbsd_pkg -author: '"Patrik Lundin (@eest)" ' +author: "Patrik Lundin (@eest)" version_added: "1.1" short_description: Manage packages on OpenBSD. description: diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py index 8f06a03a1b2..5b75ad1a260 100644 --- a/packaging/os/opkg.py +++ b/packaging/os/opkg.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: opkg -author: '"Patrick Pelletier (@skinp)" ' +author: "Patrick Pelletier (@skinp)" short_description: Package manager for OpenWrt description: - Manages OpenWrt packages diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 632a36796dc..837eefd243e 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5 -author: '"Peter Oliver (@mavit)" ' +author: "Peter Oliver (@mavit)" short_description: Manages packages with the Solaris 11 Image Packaging System version_added: 1.9 description: diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 1db07d512b7..3881f5dd0b8 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' --- module: pkg5_publisher -author: '"Peter Oliver (@mavit)" ' +author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers version_added: 1.9 description: diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py index 33bcb5482f0..e600026409b 100644 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -31,8 +31,8 @@ description: or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" author: - - '"Larry Gilbert (L2G)" ' - - '"Shaun Zinck (@szinck)" ' + - "Larry Gilbert (L2G)" + - "Shaun Zinck (@szinck)" notes: - "Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index 132cff637e6..c0819dbe5b8 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,7 +63,7 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false -author: '"bleader (@bleader)" ' +author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. ''' diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 62107aa0475..3a4720630cf 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -32,7 +32,7 @@ description: - Pkgutil is an advanced packaging system, which resolves dependency on installation. It is designed for CSW packages. version_added: "1.3" -author: '"Alexander Winkler (@dermute)" ' +author: "Alexander Winkler (@dermute)" options: name: description: diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index 1673c4dde37..b4e3044167e 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -43,7 +43,7 @@ options: choices: [ 'yes', 'no' ] required: false default: yes -author: '"berenddeboer (@berenddeboer)" ' +author: "berenddeboer (@berenddeboer)" ''' EXAMPLES = ''' diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py index 56b33d401bf..157fa212c17 100644 --- a/packaging/os/swdepot.py +++ b/packaging/os/swdepot.py @@ -29,7 +29,7 @@ description: - Will install, upgrade and remove packages with swdepot package manager (HP-UX) version_added: "1.4" notes: [] -author: '"Raul Melo (@melodous)" ' +author: "Raul Melo (@melodous)" options: name: description: diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index c202ee27ace..7b7aaefbd1d 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -57,7 +57,7 @@ options: required: false default: yes choices: [ "yes", "no" ] -author: '"Philippe Makowski (@pmakowski)" ' +author: "Philippe Makowski (@pmakowski)" notes: [] ''' diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index c175c152050..f3205051fdf 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -31,7 +31,7 @@ import re DOCUMENTATION = ''' --- module: zypper -author: '"Patrick Callahan (@dirtyharrycallahan)" ' +author: "Patrick Callahan (@dirtyharrycallahan)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 3210e93d391..54e20429638 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: zypper_repository -author: '"Matthias Vogelgesang (@matze)" ' +author: "Matthias Vogelgesang (@matze)" version_added: "1.4" short_description: Add and remove Zypper repositories description: diff --git a/source_control/bzr.py b/source_control/bzr.py index 5519a8af123..0fc6ac28584 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -22,7 +22,7 @@ DOCUMENTATION = u''' --- module: bzr -author: '"André Paramés (@andreparames)" ' +author: "André Paramés (@andreparames)" version_added: "1.1" short_description: Deploy software (or files) from bzr branches description: diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py index bb60b634cb3..d75fcb1573d 100644 --- a/source_control/github_hooks.py +++ b/source_control/github_hooks.py @@ -64,7 +64,7 @@ options: default: 'json' choices: ['json', 'form'] -author: '"Phillip Gentry, CX Inc (@pcgentry)" ' +author: "Phillip Gentry, CX Inc (@pcgentry)" ''' EXAMPLES = ''' diff --git a/system/alternatives.py b/system/alternatives.py index 06d9bea25f0..90e2237f86c 100644 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -31,8 +31,8 @@ description: - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" author: - - '"David Wittman (@DavidWittman)" ' - - '"Gabe Mulley (@mulby)" ' + - "David Wittman (@DavidWittman)" + - "Gabe Mulley (@mulby)" options: name: description: diff --git a/system/at.py b/system/at.py index 03ac14a44aa..0ce9ff2c7d4 100644 --- a/system/at.py +++ b/system/at.py @@ -59,7 +59,7 @@ options: default: false requirements: - at -author: '"Richard Isaacson (@risaacson)" ' +author: "Richard Isaacson (@risaacson)" ''' EXAMPLES = ''' diff --git a/system/capabilities.py b/system/capabilities.py index 0c7f2e22d0b..ce8ffcfa632 100644 --- a/system/capabilities.py +++ b/system/capabilities.py @@ -50,7 +50,7 @@ notes: and flags to compare, so you will want to ensure that your capabilities argument matches the final capabilities. requirements: [] -author: '"Nate Coraor (@natefoo)" ' +author: "Nate Coraor (@natefoo)" ''' EXAMPLES = ''' diff --git a/system/crypttab.py b/system/crypttab.py index 5b0edc62363..44d9f859791 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -69,7 +69,7 @@ options: notes: [] requirements: [] -author: '"Steve (@groks)" ' +author: "Steve (@groks)" ''' EXAMPLES = ''' diff --git a/system/filesystem.py b/system/filesystem.py index a2f979ecd0b..1e867f30270 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: '"Alexander Bulimov (@abulimov)" ' +author: "Alexander Bulimov (@abulimov)" module: filesystem short_description: Makes file system on block device description: diff --git a/system/firewalld.py b/system/firewalld.py index e16e4e4a9dd..37ed1801f68 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -69,7 +69,7 @@ options: notes: - Not tested on any Debian based system. requirements: [ 'firewalld >= 0.2.11' ] -author: '"Adam Miller (@maxamillion)" ' +author: "Adam Miller (@maxamillion)" ''' EXAMPLES = ''' diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 32359cd2a82..7719006502d 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -103,7 +103,7 @@ options: notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" -author: '"Taneli Leppä (@rosmo)" ' +author: "Taneli Leppä (@rosmo)" """ EXAMPLES = """ diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py index b0901473867..296a082a2ea 100644 --- a/system/kernel_blacklist.py +++ b/system/kernel_blacklist.py @@ -25,7 +25,7 @@ import re DOCUMENTATION = ''' --- module: kernel_blacklist -author: '"Matthias Vogelgesang (@matze)" ' +author: "Matthias Vogelgesang (@matze)" version_added: 1.4 short_description: Blacklist kernel modules description: diff --git a/system/known_hosts.py b/system/known_hosts.py index 74c6b0e90c7..303d9410d1e 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -51,7 +51,7 @@ options: required: no default: present requirements: [ ] -author: '"Matthew Vernon (@mcv21)" ' +author: "Matthew Vernon (@mcv21)" ''' EXAMPLES = ''' diff --git a/system/lvg.py b/system/lvg.py index 3c6c5ef2930..9e3ba2d2931 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- -author: '"Alexander Bulimov (@abulimov)" ' +author: "Alexander Bulimov (@abulimov)" module: lvg short_description: Configure LVM volume groups description: diff --git a/system/lvol.py b/system/lvol.py index 3225408d162..7a01d83829c 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -21,8 +21,8 @@ DOCUMENTATION = ''' --- author: - - '"Jeroen Hoekx (@jhoekx)" ' - - '"Alexander Bulimov (@abulimov)" ' + - "Jeroen Hoekx (@jhoekx)" + - "Alexander Bulimov (@abulimov)" module: lvol short_description: Configure LVM logical volumes description: diff --git a/system/modprobe.py b/system/modprobe.py index bf58e435552..64e36c784a7 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -26,9 +26,9 @@ short_description: Add or remove kernel modules requirements: [] version_added: 1.4 author: - - '"David Stygstra (@stygstra)" ' - - Julien Dauphant - - Matt Jeffery + - "David Stygstra (@stygstra)" + - "Julien Dauphant" + - "Matt Jeffery" description: - Add or remove kernel modules. options: diff --git a/system/open_iscsi.py b/system/open_iscsi.py index 97652311f8d..e2477538888 100644 --- a/system/open_iscsi.py +++ b/system/open_iscsi.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: open_iscsi -author: '"Serge van Ginderachter (@srvg)" ' +author: "Serge van Ginderachter (@srvg)" version_added: "1.4" short_description: Manage iscsi targets with open-iscsi description: diff --git a/system/ufw.py b/system/ufw.py index 91d574f945d..cd148edf2ef 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -29,9 +29,9 @@ description: - Manage firewall with UFW. version_added: 1.6 author: - - '"Aleksey Ovcharenko (@ovcharenko)" ' - - '"Jarno Keskikangas (@pyykkis)" ' - - '"Ahti Kitsik (@ahtik)" ' + - "Aleksey Ovcharenko (@ovcharenko)" + - "Jarno Keskikangas (@pyykkis)" + - "Ahti Kitsik (@ahtik)" notes: - See C(man ufw) for more examples. requirements: diff --git a/system/zfs.py b/system/zfs.py index 97a0d6f3dba..c3c87634377 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -206,7 +206,7 @@ options: - The zoned property. required: False choices: ['on','off'] -author: '"Johan Wiren (@johanwiren)" ' +author: "Johan Wiren (@johanwiren)" ''' EXAMPLES = ''' diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py index 79fe94fcddc..bf86806ad52 100644 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- module: ejabberd_user version_added: "1.5" -author: '"Peter Sprygada (@privateip)" ' +author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - ejabberd with mod_admin_extra diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py index a0949c47531..9ec67b7c7b1 100644 --- a/web_infrastructure/jboss.py +++ b/web_infrastructure/jboss.py @@ -47,7 +47,7 @@ options: notes: - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - "Ensure no identically named application is deployed through the JBoss CLI" -author: '"Jeroen Hoekx (@jhoekx)" ' +author: "Jeroen Hoekx (@jhoekx)" """ EXAMPLES = """ diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py index 3dc963cb6bd..79cfb72d4a7 100644 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -99,7 +99,7 @@ options: notes: - "Currently this only works with basic-auth." -author: '"Steve Smith (@tarka)" ' +author: "Steve Smith (@tarka)" """ EXAMPLES = """ diff --git a/windows/win_updates.py b/windows/win_updates.py index 7c93109efb9..4a9f055d8dc 100644 --- a/windows/win_updates.py +++ b/windows/win_updates.py @@ -41,7 +41,7 @@ options: - (anything that is a valid update category) default: critical aliases: [] -author: '"Peter Mounce (@petemounce)" ' +author: "Peter Mounce (@petemounce)" ''' EXAMPLES = ''' From caed7573d50fd51a658f23f54c61e42868c9bca2 Mon Sep 17 00:00:00 2001 From: Brian Brazil Date: Tue, 30 Sep 2014 10:59:01 +0100 Subject: [PATCH 672/720] Add dpkg_selections module, that works with dpkg --get-selections and --set-selections. This is mainly useful for setting packages to 'hold' to prevent them from being automatically upgraded. --- packaging/dpkg_selections | 60 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 packaging/dpkg_selections diff --git a/packaging/dpkg_selections b/packaging/dpkg_selections new file mode 100644 index 00000000000..f09ff9a9f00 --- /dev/null +++ b/packaging/dpkg_selections @@ -0,0 +1,60 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +--- +module: dpkg_selections +short_description: Dpkg package selection selections +description: + - Change dpkg package selection state via --get-selections and --set-selections. +version_added: "2.0" +author: Brian Brazil +options: + name: + description: + - Name of the package + required: true + selection: + description: + - The selection state to set the package to. + choices: [ 'install', 'hold', 'deinstall', 'purge' ] + required: true +notes: + - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that. +''' +EXAMPLES = ''' +# Prevent python from being upgraded. +- dpkg_selections: name=python selection=hold +''' + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + selection = dict(choices=['install', 'hold', 'deinstall', 'purge']) + ), + supports_check_mode=True, + ) + + dpkg = module.get_bin_path('dpkg', True) + + name = module.params['name'] + selection = module.params['selection'] + + # Get current settings. + rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True) + if not out: + current = 'not present' + else: + current = out.split()[1] + + changed = current != selection + + if module.check_mode or not changed: + module.exit_json(changed=changed, before=current, after=selection) + + module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True) + module.exit_json(changed=changed, before=current, after=selection) + + +from ansible.module_utils.basic import * +main() From 330e66327ae91c378857d992d6edafc2fc883b8b Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Wed, 17 Jun 2015 14:53:17 +0200 Subject: [PATCH 673/720] New module to copy (push) files to a vCenter datastore --- cloud/vmware/vsphere_copy | 152 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 cloud/vmware/vsphere_copy diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy new file mode 100644 index 00000000000..f5f12f83555 --- /dev/null +++ b/cloud/vmware/vsphere_copy @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015 Dag Wieers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vsphere_copy +short_description: Copy a file to a vCenter datastore +description: Upload files to a vCenter datastore +version_added: 2.0 +author: Dag Wieers +options: + host: + description: + - The vCenter server on which the datastore is available. + required: true + login: + description: + - The login name to authenticate on the vCenter server. + required: true + password: + description: + - The password to authenticate on the vCenter server. + required: true + src: + description: + - The file to push to vCenter + required: true + datacenter: + description: + - The datacenter on the vCenter server that holds the datastore. + required: true + datastore: + description: + - The datastore on the vCenter server to push files to. + required: true + path: + description: + - The file to push to the datastore on the vCenter server. + required: true +notes: + - This module ought to be run from a system that can access vCenter directly. + Either by using C(transport: local), or using C(delegate_to). + - Tested on vSphere 5.5 +''' + +EXAMPLES = ''' +- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file + transport: local +- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file + delegate_to: other_system +''' + +import atexit +import base64 +import httplib +import urllib +import mmap +import errno +import socket + +def vmware_path(datastore, datacenter, path): + ''' Constructs a URL path that VSphere accepts reliably ''' + path = "/folder/%s" % path.lstrip("/") + if not path.startswith("/"): + path = "/" + path + params = dict( dsName = datastore ) + if datacenter: + params["dcPath"] = datacenter + params = urllib.urlencode(params) + return "%s?%s" % (path, params) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + host = dict(required=True, aliases=[ 'hostname' ]), + login = dict(required=True, aliases=[ 'username' ]), + password = dict(required=True), + src = dict(required=True, aliases=[ 'name' ]), + datacenter = dict(required=True), + datastore = dict(required=True), + dest = dict(required=True, aliases=[ 'path' ]), + ), + # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable + supports_check_mode = False, + ) + + host = module.params.get('host') + login = module.params.get('login') + password = module.params.get('password') + src = module.params.get('src') + datacenter = module.params.get('datacenter') + datastore = module.params.get('datastore') + dest = module.params.get('dest') + + fd = open(src, "rb") + atexit.register(fd.close) + + data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ) + atexit.register(data.close) + + conn = httplib.HTTPSConnection(host) + atexit.register(conn.close) + + remote_path = vmware_path(datastore, datacenter, dest) + auth = base64.encodestring('%s:%s' % (login, password)) + headers = { + "Content-Type": "application/octet-stream", + "Content-Length": str(len(data)), + "Accept": "text/plain", + "Authorization": "Basic %s" % auth, + } + + # URL is only used in JSON output (helps troubleshooting) + url = 'https://%s%s' % (host, remote_path) + + try: + conn.request("PUT", remote_path, body=data, headers=headers) + except socket.error, e: + if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET: + # VSphere resets connection if the file is in use and cannot be replaced + module.fail_json(msg='Failed to upload, image probably in use', status=e[0], reason=str(e), url=url) + else: + module.fail_json(msg=str(e), status=e[0], reason=str(e), url=url) + + resp = conn.getresponse() + + if resp.status in range(200, 300): + module.exit_json(changed=True, status=resp.status, reason=resp.reason, url=url) + else: + module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url) + +# this is magic, see lib/ansible/module_common.py +#<> +main() From f967aa376d583e745a371987414585a359abe25d Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Wed, 17 Jun 2015 15:07:18 +0200 Subject: [PATCH 674/720] Fix TravisCI failure on python 2.4 --- system/osx_defaults.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/system/osx_defaults.py b/system/osx_defaults.py index 0dd7ca8ff6b..7e2fe38ad77 100644 --- a/system/osx_defaults.py +++ b/system/osx_defaults.py @@ -209,7 +209,10 @@ class OSXDefaults(object): # We need to convert some values so the defaults commandline understands it if type(self.value) is bool: - value = "TRUE" if self.value else "FALSE" + if self.value: + value = "TRUE" + else: + value = "FALSE" elif type(self.value) is int or type(self.value) is float: value = str(self.value) elif self.array_add and self.current_value is not None: From 685653b23b9d455507e6fa472de7cd3b8c03ee6c Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Wed, 17 Jun 2015 15:22:10 +0200 Subject: [PATCH 675/720] Another incompatibility with python 2.4 --- system/osx_defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/osx_defaults.py b/system/osx_defaults.py index 7e2fe38ad77..e5d2bc51731 100644 --- a/system/osx_defaults.py +++ b/system/osx_defaults.py @@ -343,7 +343,7 @@ def main(): array_add=array_add, value=value, state=state, path=path) changed = defaults.run() module.exit_json(changed=changed) - except OSXDefaultsException as e: + except OSXDefaultsException, e: module.fail_json(msg=e.message) # /main ------------------------------------------------------------------- }}} From 8753b2cd208c94d7e4a003462e6720ac3b0965cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 19:19:16 -0400 Subject: [PATCH 676/720] minor docfixes --- system/osx_defaults.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/osx_defaults.py b/system/osx_defaults.py index e5d2bc51731..e4dc5f8c750 100644 --- a/system/osx_defaults.py +++ b/system/osx_defaults.py @@ -19,14 +19,14 @@ DOCUMENTATION = ''' --- module: osx_defaults -author: Franck Nijhof +author: Franck Nijhof (@frenck) short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible description: - osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts. Mac OS X applications and other programs use the defaults system to record user preferences and other information that must be maintained when the applications aren't running (such as default font for new documents, or the position of an Info panel). -version_added: 1.8 +version_added: "2.0" options: domain: description: @@ -47,7 +47,7 @@ options: description: - Add new elements to the array for a key which has an array as its value. required: false - default: string + default: false choices: [ "true", "false" ] value: description: From 9db032aa118425cde4db904b0e8efac5ed07735b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 09:42:18 -0400 Subject: [PATCH 677/720] minor doc update --- cloud/vmware/vsphere_copy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy index f5f12f83555..f85beab481d 100644 --- a/cloud/vmware/vsphere_copy +++ b/cloud/vmware/vsphere_copy @@ -24,7 +24,7 @@ module: vsphere_copy short_description: Copy a file to a vCenter datastore description: Upload files to a vCenter datastore version_added: 2.0 -author: Dag Wieers +author: Dag Wieers (@dagwieers) options: host: description: @@ -55,8 +55,8 @@ options: - The file to push to the datastore on the vCenter server. required: true notes: - - This module ought to be run from a system that can access vCenter directly. - Either by using C(transport: local), or using C(delegate_to). + - This module ought to be run from a system that can access vCenter directly and has the file to transfer. + It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to). - Tested on vSphere 5.5 ''' From 656e1a6deb965dbc25a8e3a4f7afc4ee7ac22814 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Wed, 17 Jun 2015 17:29:38 +0200 Subject: [PATCH 678/720] allow wait, wait_retries and wait_interval parameters for haproxy module. This allows the haproxy to wait for status "UP" when state=enabled and status "MAINT" when state=disabled --- network/haproxy.py | 72 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/network/haproxy.py b/network/haproxy.py index 00fc4ff63a1..64059cbdf1c 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -68,6 +68,21 @@ options: - When disabling server, immediately terminate all the sessions attached to the specified server. This can be used to terminate long-running sessions after a server is put into maintenance mode, for instance. required: false default: false + wait: + description: + - Wait until the server reports a status of 'UP' when state=enabled, or status of 'MAINT' when state=disabled + required: false + default: false + wait_retries: + description: + - number of times to check for status after changing the state + required: false + default: 20 + wait_interval: + description: + - number of seconds to wait between retries + required: false + default: 1 ''' EXAMPLES = ''' @@ -82,12 +97,21 @@ examples: # disable server, provide socket file - haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www +# disable server, provide socket file, wait until status reports in maintenance +- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes + # disable backend server in 'www' backend pool and drop open sessions to it - haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true # enable server in 'www' backend pool - haproxy: state=enabled host={{ inventory_hostname }} backend=www +# enable server in 'www' backend pool wait until healthy +- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes + +# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health +- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5 + # enable server in 'www' backend pool with change server(s) weight - haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www @@ -95,11 +119,15 @@ author: "Ravi Bhure (@ravibhure)" ''' import socket +import csv +import time DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] +WAIT_RETRIES=20 +WAIT_INTERVAL=1 ###################################################################### class TimeoutException(Exception): @@ -126,10 +154,12 @@ class HAProxy(object): self.weight = self.module.params['weight'] self.socket = self.module.params['socket'] self.shutdown_sessions = self.module.params['shutdown_sessions'] - + self.wait = self.module.params['wait'] + self.wait_retries = self.module.params['wait_retries'] + self.wait_interval = self.module.params['wait_interval'] self.command_results = [] - def execute(self, cmd, timeout=200): + def execute(self, cmd, timeout=200, capture_output=True): """ Executes a HAProxy command by sending a message to a HAProxy's local UNIX socket and waiting up to 'timeout' milliseconds for the response. @@ -144,10 +174,35 @@ class HAProxy(object): while buf: result += buf buf = self.client.recv(RECV_SIZE) - self.command_results = result.strip() + if capture_output: + self.command_results = result.strip() self.client.close() return result + def wait_until_status(self, pxname, svname, status): + """ + Wait for a server to become active (status == 'UP'). Try RETRIES times + with INTERVAL seconds of sleep in between. If the service has not reached + the expected status in that time, the module will fail. If the service was + not found, the module will fail. + """ + for i in range(1, self.wait_retries): + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + found = False + for row in r: + if row['pxname'] == pxname and row['svname'] == svname: + found = True + if row['status'] == status: + return True; + else: + time.sleep(self.wait_interval) + + if not found: + self.module.fail_json(msg="unable to find server %s/%s" % (pxname, svname)) + + self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries)) + def enabled(self, host, backend, weight): """ Enabled action, marks server to UP and checks are re-enabled, @@ -170,6 +225,8 @@ class HAProxy(object): if weight: cmd += "; set weight %s/%s %s" % (pxname, svname, weight) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'UP') else: pxname = backend @@ -177,6 +234,8 @@ class HAProxy(object): if weight: cmd += "; set weight %s/%s %s" % (pxname, svname, weight) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'UP') def disabled(self, host, backend, shutdown_sessions): """ @@ -200,6 +259,8 @@ class HAProxy(object): if shutdown_sessions: cmd += "; shutdown sessions server %s/%s" % (pxname, svname) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'MAINT') else: pxname = backend @@ -207,6 +268,8 @@ class HAProxy(object): if shutdown_sessions: cmd += "; shutdown sessions server %s/%s" % (pxname, svname) self.execute(cmd) + if self.wait: + self.wait_until_status(pxname, svname, 'MAINT') def act(self): """ @@ -236,6 +299,9 @@ def main(): weight=dict(required=False, default=None), socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION), shutdown_sessions=dict(required=False, default=False), + wait=dict(required=False, default=False), + wait_retries=dict(required=False, default=WAIT_RETRIES), + wait_interval=dict(required=False, default=WAIT_INTERVAL), ), ) From 5a1109229d6cb0c352e75866e1b2ace47ff24d17 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Thu, 18 Jun 2015 09:11:16 +0200 Subject: [PATCH 679/720] added version_added: "2.0" to new parameters --- network/haproxy.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/network/haproxy.py b/network/haproxy.py index 64059cbdf1c..690aa60bbba 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -73,16 +73,19 @@ options: - Wait until the server reports a status of 'UP' when state=enabled, or status of 'MAINT' when state=disabled required: false default: false + version_added: "2.0" wait_retries: description: - number of times to check for status after changing the state required: false default: 20 + version_added: "2.0" wait_interval: description: - number of seconds to wait between retries required: false default: 1 + version_added: "2.0" ''' EXAMPLES = ''' @@ -181,7 +184,7 @@ class HAProxy(object): def wait_until_status(self, pxname, svname, status): """ - Wait for a server to become active (status == 'UP'). Try RETRIES times + Wait for a service to reach the specified status. Try RETRIES times with INTERVAL seconds of sleep in between. If the service has not reached the expected status in that time, the module will fail. If the service was not found, the module will fail. From 1b0676b559eb0dafb6dba6fe0502903821e0a701 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Wed, 17 Jun 2015 16:12:58 -0500 Subject: [PATCH 680/720] packaging/os/portage: Improve check mode handling When running in check mode, the *portage* module always reports that no changes were made, even if the requested packages do not exist on the system. This is because it was erroneously expecting `emerge --pretend` to produce the same output as `emerge` by itself would, and attempts to parse it. This is not correct, for several reasons. Most specifically, the string for which it is searching does not exist in the pretend output. Additionally, `emerge --pretend` always prints the requested packages, whether they are already installed or not; in the former case, it shows them as reinstalls. This commit adjusts the behavior to rely on `equery` alone when running in check mode. If `equery` reports at least one package is not installed, then nothing else is done: the system will definitely be changed. Signed-off-by: Dustin C. Hatch --- packaging/os/portage.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 2ce0379a8ec..712881a91ea 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -254,6 +254,8 @@ def emerge_packages(module, packages): break else: module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') args = [] emerge_flags = { From e3d608297d95a7c04d54303ee0abd6fda64dcde1 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Thu, 18 Jun 2015 13:55:03 -0500 Subject: [PATCH 681/720] packaging/os/portage: Handle noreplace in check mode The `--noreplace` argument to `emerge` is generally coupled with `--newuse` or `--changed-use`, and can be used instruct Portage to rebuild a package only if necessary. Simply checking to see if the package is already installed using `equery` is not sufficient to determine if any changes would be made, so that step is skipped when the `noreplace` module argument is specified. The module then falls back to parsing the output from `emerge` to determine if anything changed. In check mode, `emerge` is called with `--pretend`, so it produces different output, and the parsing fails to correctly infer that a change would be made. This commit adds another regular expression to check when running in check mode that matches the pretend output from `emerge`. Signed-off-by: Dustin C. Hatch --- packaging/os/portage.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 712881a91ea..79db8d74740 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -300,13 +300,18 @@ def emerge_packages(module, packages): changed = True for line in out.splitlines(): if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' break else: changed = False + msg = 'No packages installed.' module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages installed.', + msg=msg, ) From 5e5eec1806e406127484e18492f4c1d6b45a6341 Mon Sep 17 00:00:00 2001 From: Andrew Udvare Date: Thu, 18 Jun 2015 15:59:46 -0700 Subject: [PATCH 682/720] --usepkgonly does not imply --getbinpkg Add usepkg option to allow conditional building from source if binary packages are not found https://github.com/ansible/ansible-modules-extras/commit/5a6de937cb053d8366e06c01ec59b37c22d0629c#commitcomment-11755140 https://wiki.gentoo.org/wiki/Binary_package_guide#Using_binary_packages --- packaging/os/portage.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index ab96cb22e60..e62b0983033 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -267,14 +267,14 @@ def emerge_packages(module, packages): 'verbose': '--verbose', 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', } for flag, arg in emerge_flags.iteritems(): if p[flag]: args.append(arg) - # usepkgonly implies getbinpkg - if p['usepkgonly'] and not p['getbinpkg']: - args.append('--getbinpkg') + if 'usepkg' in p and 'usepkgonly' in p: + module.fail_json(msg='Use only one of usepkg, usepkgonly') cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: @@ -406,6 +406,7 @@ def main(): sync=dict(default=None, choices=['yes', 'web']), getbinpkg=dict(default=None, choices=['yes']), usepkgonly=dict(default=None, choices=['yes']), + usepkg=dict(default=None, choices=['yes']), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], From 6b8c462d6605341318279a9ab11cc6843642e230 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Fri, 19 Jun 2015 12:40:56 +1000 Subject: [PATCH 683/720] Add GUIDELINES for AWS module development Starting point for a reference when doing pull request reviews. If something doesn't meet the guidelines we can point people at them. If something is bad but is not mentioned in the guidelines, we should add it here. --- cloud/amazon/GUIDELINES.md | 88 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 cloud/amazon/GUIDELINES.md diff --git a/cloud/amazon/GUIDELINES.md b/cloud/amazon/GUIDELINES.md new file mode 100644 index 00000000000..ee5aea90ef7 --- /dev/null +++ b/cloud/amazon/GUIDELINES.md @@ -0,0 +1,88 @@ +Guidelines for AWS modules +-------------------------- + +Naming your module +================== + +Base the name of the module on the part of AWS that +you actually use. (A good rule of thumb is to take +whatever module you use with boto as a starting point). + +Don't further abbreviate names - if something is a well +known abbreviation due to it being a major component of +AWS, that's fine, but don't create new ones independently +(e.g. VPC, ELB, etc. are fine) + +Using boto +========== + +Wrap the `import` statements in a try block and fail the +module later on if the import fails + +``` +try: + import boto + import boto.module.that.you.use + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + module_specific_parameter=dict(), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') +``` + + +Try and keep backward compatibility with relatively recent +versions of boto. That means that if want to implement some +functionality that uses a new feature of boto, it should only +fail if that feature actually needs to be run, with a message +saying which version of boto is needed. + +Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`) +to check whether boto supports a feature rather than version checking + +e.g. from the `ec2` module: +``` +if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name +else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") +``` + + +Connecting to AWS +================= + +For EC2 you can just use + +``` +ec2 = ec2_connect(module) +``` + +For other modules, you should use `get_aws_connection_info` and then +`connect_to_aws`. To connect to an example `xyz` service: + +``` +region, ec2_url, aws_connect_params = get_aws_connection_info(module) +xyz = connect_to_aws(boto.xyz, region, **aws_connect_params) +``` + +The reason for using `get_aws_connection_info` and `connect_to_aws` +(and even `ec2_connect` uses those under the hood) rather than doing it +yourself is that they handle some of the more esoteric connection +options such as security tokens and boto profiles. From 628f2b98b69dba0fa741c87ddcd7c45108311509 Mon Sep 17 00:00:00 2001 From: Amir Moulavi Date: Fri, 19 Jun 2015 09:12:08 +0200 Subject: [PATCH 684/720] Implementation of EC2 AMI copy between regions --- cloud/amazon/ec2_ami_copy.py | 211 +++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 cloud/amazon/ec2_ami_copy.py diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py new file mode 100644 index 00000000000..909ec4a9c7a --- /dev/null +++ b/cloud/amazon/ec2_ami_copy.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_ami_copy +short_description: copies AMI between AWS regions, return new image id +description: + - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5 +version_added: "1.7" +options: + source_region: + description: + - the source region that AMI should be copied from + required: true + default: null + region: + description: + - the destination region that AMI should be copied to + required: true + default: null + aliases: ['aws_region', 'ec2_region', 'dest_region'] + source_image_id: + description: + - the id of the image in source region that should be copied + required: true + default: null + name: + description: + - The name of the new image to copy + required: false + default: null + description: + description: + - An optional human-readable string describing the contents and purpose of the new AMI. + required: false + default: null + wait: + description: + - wait for the copied AMI to be in state 'available' before returning. + required: false + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + required: false + default: 1200 + tags: + description: + - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}' + required: false + default: null + +author: Amir Moulavi +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic AMI Copy +- local_action: + module: ec2_ami_copy + source_region: eu-west-1 + dest_region: us-east-1 + source_image_id: ami-xxxxxxx + name: SuperService-new-AMI + description: latest patch + tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}' + wait: yes + register: image_id +''' + + +import sys +import time + +try: + import boto + import boto.ec2 + from boto.vpc import VPCConnection + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + +def copy_image(module, ec2): + """ + Copies an AMI + + module : AnsibleModule object + ec2: authenticated ec2 connection object + """ + + source_region = module.params.get('source_region') + source_image_id = module.params.get('source_image_id') + name = module.params.get('name') + description = module.params.get('description') + tags = module.params.get('tags') + wait_timeout = int(module.params.get('wait_timeout')) + wait = module.params.get('wait') + + try: + params = {'source_region': source_region, + 'source_image_id': source_image_id, + 'name': name, + 'description': description + } + + image_id = ec2.copy_image(**params).image_id + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + + img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait) + + img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait) + + register_tags_if_any(module, ec2, tags, image_id) + + module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True) + + +# register tags to the copied AMI in dest_region +def register_tags_if_any(module, ec2, tags, image_id): + if tags: + try: + ec2.create_tags([image_id], tags) + except Exception as e: + module.fail_json(msg=str(e)) + + +# wait here until the image is copied (i.e. the state becomes available +def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait): + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): + img = ec2.get_image(image_id) + time.sleep(3) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="timed out waiting for image to be copied") + return img + + +# wait until the image is recognized. +def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait): + for i in range(wait_timeout): + try: + return ec2.get_image(image_id) + except boto.exception.EC2ResponseError, e: + # This exception we expect initially right after registering the copy with EC2 API + if 'InvalidAMIID.NotFound' in e.error_code and wait: + time.sleep(1) + else: + # On any other exception we should fail + module.fail_json( + msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str( + e)) + else: + module.fail_json(msg="timed out waiting for image to be recognized") + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + source_region=dict(required=True), + source_image_id=dict(required=True), + name=dict(), + description=dict(default=""), + wait=dict(type='bool', default=False), + wait_timeout=dict(default=1200), + tags=dict(type='dict'))) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + ec2 = ec2_connect(module) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + try: + region, ec2_url, boto_params = get_aws_connection_info(module) + vpc = connect_to_aws(boto.vpc, region, **boto_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + if not region: + module.fail_json(msg="region must be specified") + + copy_image(module, ec2) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() + From 3f3a73da37c0c8e8425b2c41e7b9ee18f2851656 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Fri, 16 Jan 2015 15:59:17 +0100 Subject: [PATCH 685/720] Add sensu_check module --- monitoring/sensu_check.py | 328 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 328 insertions(+) create mode 100644 monitoring/sensu_check.py diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py new file mode 100644 index 00000000000..b968304c34f --- /dev/null +++ b/monitoring/sensu_check.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: sensu_check +short_description: Manage Sensu checks +version_added: 2.0 +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and will not be added to the check definition unless specified. + - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, + - they are simply specified for your convenience. +options: + name: + description: + - The name of the check + - This is the key that is used to determine whether a check exists + required: true + state: + description: Whether the check should be present or not + choices: [ 'present', 'absent' ] + required: false + default: present + path: + description: + - Path to the json file of the check to be added/removed. + - Will be created if it does not exist (unless I(state=absent)). + - The parent folders need to exist when I(state=present), otherwise an error will be thrown + required: false + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so + - you can get the original file back if you somehow clobbered it incorrectly. + choices: [ 'yes', 'no' ] + required: false + default: no + command: + description: + - Path to the sensu check to run (not required when I(state=absent)) + required: true + handlers: + description: + - List of handlers to notify when the check fails + required: false + default: [] + subscribers: + description: + - List of subscribers/channels this check should run for + - See sensu_subscribers to subscribe a machine to a channel + required: false + default: [] + interval: + description: + - Check interval in seconds + required: false + default: null + timeout: + description: + - Timeout for the check + required: false + default: 10 + handle: + description: + - Whether the check should be handled or not + choices: [ 'yes', 'no' ] + required: false + default: yes + subdue_begin: + description: + - When to disable handling of check failures + required: false + default: null + subdue_end: + description: + - When to enable handling of check failures + required: false + default: null + dependencies: + description: + - Other checks this check depends on, if dependencies fail, + - handling of this check will be disabled + required: false + default: [] + metric: + description: Whether the check is a metric + choices: [ 'yes', 'no' ] + required: false + default: no + standalone: + description: + - Whether the check should be scheduled by the sensu client or server + - This option obviates the need for specifying the I(subscribers) option + choices: [ 'yes', 'no' ] + required: false + default: no + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it via the sensu api + choices: [ 'yes', 'no' ] + required: false + default: yes + occurrences: + description: + - Number of event occurrences before the handler should take action + required: false + default: 1 + refresh: + description: + - Number of seconds handlers should wait before taking second action + required: false + default: null + aggregate: + description: + - Classifies the check as an aggregate check, + - making it available via the aggregate API + choices: [ 'yes', 'no' ] + required: false + default: no + low_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null + high_flap_threshold: + description: + - The low threshhold for flap detection + required: false + default: null +requirements: [ ] +author: Anders Ingemann +''' + +EXAMPLES = ''' +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: get cpu metrics + sensu_check: name=cpu_load + command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric=yes handlers=relay subscribers=common interval=60 + +# Check whether nginx is running +- name: check nginx process + sensu_check: name=nginx_running + command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid' + handlers=default subscribers=nginx interval=60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: check disk + sensu_check: name=check_disk_capacity +''' + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + import json + except ImportError: + import simplejson as json + + try: + with open(path) as stream: + config = json.load(stream) + except IOError as e: + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + with open(path, 'w') as stream: + stream.write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg=str(e)) + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': 'no'}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list'}, + 'subscribers': {'type': 'list'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list'}, + 'metric': {'type': 'bool', 'default': 'no'}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + +from ansible.module_utils.basic import * +main() From 35b6bc417d6b825189486a094b833c226ca30bb9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 19 Jun 2015 11:55:05 +0200 Subject: [PATCH 686/720] cloudstack: new module cs_facts --- cloud/cloudstack/cs_facts.py | 221 +++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 cloud/cloudstack/cs_facts.py diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py new file mode 100644 index 00000000000..f8749834120 --- /dev/null +++ b/cloud/cloudstack/cs_facts.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_facts +short_description: Gather facts on instances of Apache CloudStack based clouds. +description: + - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + filter: + description: + - Filter for a specific fact. + required: false + default: null + choices: + - cloudstack_service_offering + - cloudstack_availability_zone + - cloudstack_public_hostname + - cloudstack_public_ipv4 + - cloudstack_local_hostname + - cloudstack_local_ipv4 + - cloudstack_instance_id + - cloudstack_user_data +requirements: [ 'yaml' ] +''' + +EXAMPLES = ''' +# Gather all facts on instances +- name: Gather cloudstack facts + cs_facts: + +# Gather specific fact on instances +- name: Gather cloudstack facts + cs_facts: filter=cloudstack_instance_id +''' + +RETURN = ''' +--- +cloudstack_availability_zone: + description: zone the instance is deployed in. + returned: success + type: string + sample: ch-gva-2 +cloudstack_instance_id: + description: UUID of the instance. + returned: success + type: string + sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_hostname: + description: local hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_ipv4: + description: local IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_public_hostname: + description: public hostname of the instance. + returned: success + type: string + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_public_ipv4: + description: public IPv4 of the instance. + returned: success + type: string + sample: 185.19.28.35 +cloudstack_service_offering: + description: service offering of the instance. + returned: success + type: string + sample: Micro 512mb 1cpu +cloudstack_user_data: + description: data of the instance provided by users. + returned: success + type: dict + sample: { "bla": "foo" } +''' + +import os + +try: + import yaml + has_lib_yaml = True +except ImportError: + has_lib_yaml = False + +CS_METADATA_BASE_URL = "http://%s/latest/meta-data" +CS_USERDATA_BASE_URL = "http://%s/latest/user-data" + +class CloudStackFacts(object): + + def __init__(self): + self.facts = ansible_facts(module) + self.api_ip = None + self.fact_paths = { + 'cloudstack_service_offering': 'service-offering', + 'cloudstack_availability_zone': 'availability-zone', + 'cloudstack_public_hostname': 'public-hostname', + 'cloudstack_public_ipv4': 'public-ipv4', + 'cloudstack_local_hostname': 'local-hostname', + 'cloudstack_local_ipv4': 'local-ipv4', + 'cloudstack_instance_id': 'instance-id' + } + + def run(self): + result = {} + filter = module.params.get('filter') + if not filter: + for key,path in self.fact_paths.iteritems(): + result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path) + result['cloudstack_user_data'] = self._get_user_data_json() + else: + if filter == 'cloudstack_user_data': + result['cloudstack_user_data'] = self._get_user_data_json() + elif filter in self.fact_paths: + result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter]) + return result + + + def _get_user_data_json(self): + try: + # this data come form users, we try what we can to parse it... + return yaml.load(self._fetch(CS_USERDATA_BASE_URL)) + except: + return None + + + def _fetch(self, path): + api_ip = self._get_api_ip() + if not api_ip: + return None + api_url = path % api_ip + (response, info) = fetch_url(module, api_url, force=True) + if response: + data = response.read() + else: + data = None + return data + + + def _get_dhcp_lease_file(self): + """Return the path of the lease file.""" + default_iface = self.facts['default_ipv4']['interface'] + dhcp_lease_file_locations = [ + '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu + '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6 + '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7 + '/var/db/dhclient.leases.%s' % default_iface, # openbsd + ] + for file_path in dhcp_lease_file_locations: + if os.path.exists(file_path): + return file_path + module.fail_json(msg="Could not find dhclient leases file.") + + + def _get_api_ip(self): + """Return the IP of the DHCP server.""" + if not self.api_ip: + dhcp_lease_file = self._get_dhcp_lease_file() + for line in open(dhcp_lease_file): + if 'dhcp-server-identifier' in line: + # get IP of string "option dhcp-server-identifier 185.19.28.176;" + line = line.translate(None, ';') + self.api_ip = line.split()[2] + break + if not self.api_ip: + module.fail_json(msg="No dhcp-server-identifier found in leases file.") + return self.api_ip + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + filter = dict(default=None, choices=[ + 'cloudstack_service_offering', + 'cloudstack_availability_zone', + 'cloudstack_public_hostname', + 'cloudstack_public_ipv4', + 'cloudstack_local_hostname', + 'cloudstack_local_ipv4', + 'cloudstack_instance_id', + 'cloudstack_user_data', + ]), + ), + supports_check_mode=False + ) + + if not has_lib_yaml: + module.fail_json(msg="missing python library: yaml") + + cs_facts = CloudStackFacts().run() + cs_facts_result = dict(changed=False, ansible_facts=cs_facts) + module.exit_json(**cs_facts_result) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.facts import * +main() From d0cf9617a54a49ecf819076555cce931a0f71683 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Fri, 19 Jun 2015 13:30:29 +0200 Subject: [PATCH 687/720] Spurious newline could corrupt payload Due to a spurious newline we corrupted the payload. It depends on the order of the headers and if there were headers added by vSphere. The Accept header was also not needed. --- cloud/vmware/vsphere_copy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy index f85beab481d..0ca9780c008 100644 --- a/cloud/vmware/vsphere_copy +++ b/cloud/vmware/vsphere_copy @@ -120,11 +120,10 @@ def main(): atexit.register(conn.close) remote_path = vmware_path(datastore, datacenter, dest) - auth = base64.encodestring('%s:%s' % (login, password)) + auth = base64.encodestring('%s:%s' % (login, password)).rstrip() headers = { "Content-Type": "application/octet-stream", "Content-Length": str(len(data)), - "Accept": "text/plain", "Authorization": "Basic %s" % auth, } From e203087aaabea0c0cefe6ae3d1b072ecbde84cf8 Mon Sep 17 00:00:00 2001 From: Andrew Udvare Date: Fri, 19 Jun 2015 06:04:56 -0700 Subject: [PATCH 688/720] Fix comparison --- packaging/os/portage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index e62b0983033..1043679585b 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -273,7 +273,7 @@ def emerge_packages(module, packages): if p[flag]: args.append(arg) - if 'usepkg' in p and 'usepkgonly' in p: + if p['usepkg'] and p['usepkgonly']: module.fail_json(msg='Use only one of usepkg, usepkgonly') cmd, (rc, out, err) = run_emerge(module, packages, *args) From 35a4e70deef1860eb944bdc73d6d8ca19af0444d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 12:46:16 -0400 Subject: [PATCH 689/720] minor fixes --- notification/hall.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/notification/hall.py b/notification/hall.py index 7c76e52379f..05c1a981b73 100755 --- a/notification/hall.py +++ b/notification/hall.py @@ -18,18 +18,18 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - + DOCUMENTATION = """ module: hall short_description: Send notification to Hall description: - - The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms. -version_added: 1.6 -author: Billy Kimble + - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms." +version_added: "2.0" +author: Billy Kimble (@bkimble) options: room_token: description: - - Room token provided to you by setting up the Ansible room integation on U(https://hall.com) + - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)" required: true msg: description: @@ -41,12 +41,12 @@ options: required: true picture: description: - - The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627) + - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)" required: false -""" +""" EXAMPLES = """ -- name: Send Hall notifiation +- name: Send Hall notifiation local_action: module: hall room_token: @@ -57,7 +57,7 @@ EXAMPLES = """ when: ec2.instances|length > 0 local_action: module: hall - room_token: + room_token: title: Server Creation msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." with_items: ec2.instances @@ -66,7 +66,7 @@ EXAMPLES = """ HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' def send_request_to_hall(module, room_token, payload): - headers = {'Content-Type': 'application/json'} + headers = {'Content-Type': 'application/json'} payload=module.jsonify(payload) api_endpoint = HALL_API_ENDPOINT % (room_token) response, info = fetch_url(module, api_endpoint, data=payload, headers=headers) @@ -83,7 +83,7 @@ def main(): picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'), ) ) - + room_token = module.params['room_token'] message = module.params['msg'] title = module.params['title'] From 1604382538db616867207bd1df1b05d893010213 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 11:04:25 -0400 Subject: [PATCH 690/720] monior docfixes added extensino to vsphere_copy so it actually installs --- cloud/amazon/ec2_ami_copy.py | 5 +---- cloud/amazon/ec2_eni.py | 6 +++--- cloud/amazon/ec2_eni_facts.py | 4 ++-- cloud/vmware/{vsphere_copy => vsphere_copy.py} | 4 ++-- 4 files changed, 8 insertions(+), 11 deletions(-) rename cloud/vmware/{vsphere_copy => vsphere_copy.py} (96%) diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py index 909ec4a9c7a..ff9bde88022 100644 --- a/cloud/amazon/ec2_ami_copy.py +++ b/cloud/amazon/ec2_ami_copy.py @@ -20,24 +20,21 @@ module: ec2_ami_copy short_description: copies AMI between AWS regions, return new image id description: - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5 -version_added: "1.7" +version_added: "2.0" options: source_region: description: - the source region that AMI should be copied from required: true - default: null region: description: - the destination region that AMI should be copied to required: true - default: null aliases: ['aws_region', 'ec2_region', 'dest_region'] source_image_id: description: - the id of the image in source region that should be copied required: true - default: null name: description: - The name of the new image to copy diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py index 2b34e9b9405..9e878e7d558 100644 --- a/cloud/amazon/ec2_eni.py +++ b/cloud/amazon/ec2_eni.py @@ -25,13 +25,13 @@ options: eni_id: description: - The ID of the ENI - required = false - default = null + required: false + default: null instance_id: description: - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. required: false - default: null + default: null private_ip_address: description: - Private IP address. diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py index 76347c84261..981358c33af 100644 --- a/cloud/amazon/ec2_eni_facts.py +++ b/cloud/amazon/ec2_eni_facts.py @@ -25,8 +25,8 @@ options: eni_id: description: - The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned. - required = false - default = null + required: false + default: null extends_documentation_fragment: aws ''' diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy.py similarity index 96% rename from cloud/vmware/vsphere_copy rename to cloud/vmware/vsphere_copy.py index 0ca9780c008..7c044a7d51a 100644 --- a/cloud/vmware/vsphere_copy +++ b/cloud/vmware/vsphere_copy.py @@ -55,8 +55,8 @@ options: - The file to push to the datastore on the vCenter server. required: true notes: - - This module ought to be run from a system that can access vCenter directly and has the file to transfer. - It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to). + - "This module ought to be run from a system that can access vCenter directly and has the file to transfer. + It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)." - Tested on vSphere 5.5 ''' From 4b29146c4d84a94c35e9f1bd763fcb85820e801c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 08:59:19 -0700 Subject: [PATCH 691/720] be explicit about urllib import and remove conditional urllib(2) import urllib and urllib2 have been in the python stdlib since at least python-2.3. There's no reason to conditionalize it. Fixes https://github.com/ansible/ansible/issues/11322 --- monitoring/airbrake_deployment.py | 5 +++-- monitoring/newrelic_deployment.py | 5 +++-- monitoring/rollbar_deployment.py | 1 + network/citrix/netscaler.py | 4 ++-- network/dnsmadeeasy.py | 4 +++- notification/flowdock.py | 5 +++-- notification/grove.py | 2 ++ notification/hipchat.py | 5 +++-- notification/nexmo.py | 1 + notification/sendgrid.py | 5 +---- notification/twilio.py | 5 +---- 11 files changed, 23 insertions(+), 19 deletions(-) diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index 3b54e55e751..a58df024182 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -61,8 +61,7 @@ options: default: 'yes' choices: ['yes', 'no'] -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -72,6 +71,8 @@ EXAMPLES = ''' revision=4.2 ''' +import urllib + # =========================================== # Module execution. # diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 832e467dea0..3d9bc6c0ec3 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -72,8 +72,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [] ''' EXAMPLES = ''' @@ -83,6 +82,8 @@ EXAMPLES = ''' revision=1.0 ''' +import urllib + # =========================================== # Module execution. # diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 43e2aa00722..060193b78a5 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -76,6 +76,7 @@ EXAMPLES = ''' comment='Test Deploy' ''' +import urllib def main(): diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index 61bc35356e5..384a625bdca 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -81,7 +81,7 @@ options: default: 'yes' choices: ['yes', 'no'] -requirements: [ "urllib", "urllib2" ] +requirements: [] author: "Nandor Sivok (@dominis)" ''' @@ -99,7 +99,7 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api import base64 import socket - +import urllib class netscaler(object): diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index fcc7232a0da..cce7bd10082 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -86,7 +86,7 @@ notes: - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. -requirements: [ urllib, urllib2, hashlib, hmac ] +requirements: [ hashlib, hmac ] author: "Brice Burgess (@briceburg)" ''' @@ -113,6 +113,8 @@ EXAMPLES = ''' # DNSMadeEasy module specific support methods. # +import urllib + IMPORT_ERROR = None try: import json diff --git a/notification/flowdock.py b/notification/flowdock.py index 7c42e58644d..34dad8db375 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -85,8 +85,7 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [ ] ''' EXAMPLES = ''' @@ -104,6 +103,8 @@ EXAMPLES = ''' tags=tag1,tag2,tag3 ''' +import urllib + # =========================================== # Module execution. # diff --git a/notification/grove.py b/notification/grove.py index 85601d1cc78..4e4a0b5b684 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -49,6 +49,8 @@ EXAMPLES = ''' message=deployed {{ target }} ''' +import urllib + BASE_URL = 'https://grove.io/api/notice/%s/' # ============================================================== diff --git a/notification/hipchat.py b/notification/hipchat.py index 2498c11848c..32689965cf9 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -62,8 +62,7 @@ options: version_added: 1.6.0 -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] +requirements: [ ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' @@ -75,6 +74,8 @@ EXAMPLES = ''' # HipChat module specific support methods. # +import urllib + DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" diff --git a/notification/nexmo.py b/notification/nexmo.py index d0c3d05e65c..89a246c0d90 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -71,6 +71,7 @@ EXAMPLES = """ msg: "{{ inventory_hostname }} completed" """ +import urllib NEXMO_API = 'https://rest.nexmo.com/sms/json' diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 78806687e0b..7a2ee3ad657 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -84,10 +84,7 @@ EXAMPLES = ''' # ======================================= # sendgrid module support methods # -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") +import urllib, urllib2 import base64 diff --git a/notification/twilio.py b/notification/twilio.py index e9ec5bcf51e..a2dd77fb2c0 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -104,10 +104,7 @@ EXAMPLES = ''' # ======================================= # twilio module support methods # -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") +import urllib, urllib2 import base64 From 1659af1541648765d955a48be9802703dacc052b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:05:50 -0400 Subject: [PATCH 692/720] made sensu_check 2.4 friendly --- monitoring/sensu_check.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index b968304c34f..eb9d0b7bf04 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -183,8 +183,8 @@ def sensu_check(module, path, name, state='present', backup=False): import simplejson as json try: - with open(path) as stream: - config = json.load(stream) + stream = open(path, 'r') + config = json.load(stream.read()) except IOError as e: if e.errno is 2: # File not found, non-fatal if state == 'absent': @@ -196,6 +196,9 @@ def sensu_check(module, path, name, state='present', backup=False): except ValueError: msg = '{path} contains invalid JSON'.format(path=path) module.fail_json(msg=msg) + finally: + if stream: + stream.close() if 'checks' not in config: if state == 'absent': @@ -274,10 +277,13 @@ def sensu_check(module, path, name, state='present', backup=False): if backup: module.backup_local(path) try: - with open(path, 'w') as stream: - stream.write(json.dumps(config, indent=2) + '\n') + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') except IOError as e: module.fail_json(msg=str(e)) + finally: + if stream: + stream.close() return changed, reasons From dd6e8f354aaeeeaccc1566ab14cfd368d6ec1f72 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 09:07:04 -0700 Subject: [PATCH 693/720] Modify a few more modules to not conditionalize urllib(2) import. --- monitoring/librato_annotation.py | 7 +------ notification/sendgrid.py | 3 ++- notification/twilio.py | 3 ++- notification/typetalk.py | 16 +++++----------- 4 files changed, 10 insertions(+), 19 deletions(-) diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index 88d3bb81f7b..c606dfdc9a0 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -31,7 +31,6 @@ description: version_added: "1.6" author: "Seth Edwards (@sedward)" requirements: - - urllib2 - base64 options: user: @@ -107,11 +106,7 @@ EXAMPLES = ''' ''' -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False +import urllib2 def post_annotation(module): user = module.params['user'] diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 7a2ee3ad657..e1ae7b7749f 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -84,7 +84,8 @@ EXAMPLES = ''' # ======================================= # sendgrid module support methods # -import urllib, urllib2 +import urllib +import urllib2 import base64 diff --git a/notification/twilio.py b/notification/twilio.py index a2dd77fb2c0..ee12d987e9e 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -104,7 +104,8 @@ EXAMPLES = ''' # ======================================= # twilio module support methods # -import urllib, urllib2 +import urllib +import urllib2 import base64 diff --git a/notification/typetalk.py b/notification/typetalk.py index 638f97ae530..002c8b5cc85 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -25,7 +25,7 @@ options: description: - message body required: true -requirements: [ urllib, urllib2, json ] +requirements: [ json ] author: "Takashi Someda (@tksmd)" ''' @@ -33,15 +33,9 @@ EXAMPLES = ''' - typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" ''' -try: - import urllib -except ImportError: - urllib = None +import urllib -try: - import urllib2 -except ImportError: - urllib2 = None +import urllib2 try: import json @@ -96,8 +90,8 @@ def main(): supports_check_mode=False ) - if not (urllib and urllib2 and json): - module.fail_json(msg="urllib, urllib2 and json modules are required") + if not json: + module.fail_json(msg="json module is required") client_id = module.params["client_id"] client_secret = module.params["client_secret"] From eeb9d3481256b038e69638618f9d3a566e24b6c6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:10:14 -0400 Subject: [PATCH 694/720] also fixed exceptions --- monitoring/sensu_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index eb9d0b7bf04..5c932a1d303 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -185,7 +185,7 @@ def sensu_check(module, path, name, state='present', backup=False): try: stream = open(path, 'r') config = json.load(stream.read()) - except IOError as e: + except IOError, e: if e.errno is 2: # File not found, non-fatal if state == 'absent': reasons.append('file did not exist and state is `absent\'') @@ -279,7 +279,7 @@ def sensu_check(module, path, name, state='present', backup=False): try: stream = open(path, 'w') stream.write(json.dumps(config, indent=2) + '\n') - except IOError as e: + except IOError, e: module.fail_json(msg=str(e)) finally: if stream: From 286bc3d9dc80e2bb3215de823ab5ed6c2a35342c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 12:13:43 -0400 Subject: [PATCH 695/720] forgot finally 2.4 syntax --- monitoring/sensu_check.py | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index 5c932a1d303..a1bd36ca665 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -183,19 +183,20 @@ def sensu_check(module, path, name, state='present', backup=False): import simplejson as json try: - stream = open(path, 'r') - config = json.load(stream.read()) - except IOError, e: - if e.errno is 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=str(e)) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) + try: + stream = open(path, 'r') + config = json.load(stream.read()) + except IOError, e: + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) finally: if stream: stream.close() @@ -277,10 +278,11 @@ def sensu_check(module, path, name, state='present', backup=False): if backup: module.backup_local(path) try: - stream = open(path, 'w') - stream.write(json.dumps(config, indent=2) + '\n') - except IOError, e: - module.fail_json(msg=str(e)) + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError, e: + module.fail_json(msg=str(e)) finally: if stream: stream.close() From 268104fca321a777e279ed20d252e43da23a2b9a Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 20 Jun 2015 21:24:36 +1000 Subject: [PATCH 696/720] Added check_mode support to dynamodb_table module. --- cloud/amazon/dynamodb_table | 51 ++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table index 7a200a3b271..b59280a2e23 100644 --- a/cloud/amazon/dynamodb_table +++ b/cloud/amazon/dynamodb_table @@ -39,7 +39,7 @@ options: hash_key_name: description: - Name of the hash key. - - Required when state=present. + - Required when C(state=present). required: false hash_key_type: description: @@ -109,10 +109,10 @@ try: from boto.dynamodb2.fields import HashKey, RangeKey from boto.dynamodb2.types import STRING, NUMBER, BINARY from boto.exception import BotoServerError, JSONResponseError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False DYNAMO_TYPE_MAP = { @@ -132,8 +132,8 @@ def create_or_update_dynamo_table(connection, module): write_capacity = module.params.get('write_capacity') schema = [ - HashKey(hash_key_name, map_dynamo_type(hash_key_type)), - RangeKey(range_key_name, map_dynamo_type(range_key_type)) + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)), + RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type)) ] throughput = { 'read': read_capacity, @@ -155,13 +155,14 @@ def create_or_update_dynamo_table(connection, module): table = Table(table_name, connection=connection) if dynamo_table_exists(table): - changed = update_dynamo_table(table, throughput=throughput) + result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode) else: - Table.create(table_name, connection=connection, schema=schema, throughput=throughput) - changed = True + if not module.check_mode: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + result['changed'] = True - result['table_status'] = table.describe()['Table']['TableStatus'] - result['changed'] = changed + if not module.check_mode: + result['table_status'] = table.describe()['Table']['TableStatus'] except BotoServerError: result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() @@ -171,7 +172,7 @@ def create_or_update_dynamo_table(connection, module): def delete_dynamo_table(connection, module): - table_name = module.params.get('table_name') + table_name = module.params.get('name') result = dict( region=module.params.get('region'), @@ -179,14 +180,15 @@ def delete_dynamo_table(connection, module): ) try: - changed = False table = Table(table_name, connection=connection) if dynamo_table_exists(table): - table.delete() - changed = True + if not module.check_mode: + table.delete() + result['changed'] = True - result['changed'] = changed + else: + result['changed'] = False except BotoServerError: result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() @@ -207,12 +209,14 @@ def dynamo_table_exists(table): raise e -def update_dynamo_table(table, throughput=None): +def update_dynamo_table(table, throughput=None, check_mode=False): table.describe() # populate table details - # AWS complains if the throughput hasn't changed if has_throughput_changed(table, throughput): - return table.update(throughput=throughput) + if not check_mode: + return table.update(throughput=throughput) + else: + return True return False @@ -225,10 +229,6 @@ def has_throughput_changed(table, new_throughput): new_throughput['write'] != table.throughput['write'] -def map_dynamo_type(dynamo_type): - return DYNAMO_TYPE_MAP.get(dynamo_type) - - def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -242,7 +242,12 @@ def main(): write_capacity=dict(default=1, type='int'), )) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) connection = boto.dynamodb2.connect_to_region(region) From 011fef5f3275b5a1cf55a9c578c61d2dde0d3f99 Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sat, 20 Jun 2015 21:34:27 +1000 Subject: [PATCH 697/720] Added return value documentation to dynamodb_table module. --- cloud/amazon/dynamodb_table | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table index b59280a2e23..89a7e0fbb2e 100644 --- a/cloud/amazon/dynamodb_table +++ b/cloud/amazon/dynamodb_table @@ -102,6 +102,14 @@ EXAMPLES = ''' state: absent ''' +RETURN = ''' +table_status: + description: The current status of the table. + returned: success + type: string + sample: ACTIVE +''' + try: import boto import boto.dynamodb2 From ac09e609146c3f8c8ef46dc22ab75834aa5d20dc Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Sun, 21 Jun 2015 08:40:57 +1000 Subject: [PATCH 698/720] Add .py file extension to dynamodb_table module. --- cloud/amazon/{dynamodb_table => dynamodb_table.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cloud/amazon/{dynamodb_table => dynamodb_table.py} (100%) diff --git a/cloud/amazon/dynamodb_table b/cloud/amazon/dynamodb_table.py similarity index 100% rename from cloud/amazon/dynamodb_table rename to cloud/amazon/dynamodb_table.py From 75e1e9fcda109b223487de752356066035059ae7 Mon Sep 17 00:00:00 2001 From: Eike Frost Date: Tue, 28 Apr 2015 19:41:54 +0200 Subject: [PATCH 699/720] add zabbix proxy support to zabbix_host --- monitoring/zabbix_host.py | 49 ++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 772e92cb32d..6fac82c7177 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -79,6 +79,10 @@ options: description: - The timeout of API request (seconds). default: 10 + proxy: + description: + - The name of the Zabbix Proxy to be used + default: None interfaces: description: - List of interfaces to be created for the host (see example below). @@ -118,6 +122,7 @@ EXAMPLES = ''' ip: 10.xx.xx.xx dns: "" port: 12345 + proxy: a.zabbix.proxy ''' import logging @@ -174,21 +179,25 @@ class Host(object): template_ids.append(template_id) return template_ids - def add_host(self, host_name, group_ids, status, interfaces): + def add_host(self, host_name, group_ids, status, interfaces, proxy_id): try: if self._module.check_mode: self._module.exit_json(changed=True) - host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}) + parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status} + if proxy_id: + parameters['proxy_hostid'] = proxy_id + host_list = self._zapi.host.create(parameters) if len(host_list) >= 1: return host_list['hostids'][0] except Exception, e: self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) - def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list): + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id): try: if self._module.check_mode: self._module.exit_json(changed=True) - self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status}) + parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id} + self._zapi.host.update(parameters) interface_list_copy = exist_interface_list if interfaces: for interface in interfaces: @@ -234,6 +243,14 @@ class Host(object): else: return host_list[0] + # get proxyid by proxy name + def get_proxyid_by_proxy_name(self, proxy_name): + proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}}) + if len(proxy_list) < 1: + self._module.fail_json(msg="Proxy not found: %s" % proxy_name) + else: + return proxy_list[0]['proxyid'] + # get group ids by group names def get_group_ids_by_group_names(self, group_names): group_ids = [] @@ -294,7 +311,7 @@ class Host(object): # check all the properties before link or clear template def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, host): + exist_interfaces, host, proxy_id): # get the existing host's groups exist_host_groups = self.get_host_groups_by_host_id(host_id) if set(host_groups) != set(exist_host_groups): @@ -314,6 +331,9 @@ class Host(object): if set(list(template_ids)) != set(exist_template_ids): return True + if host['proxy_hostid'] != proxy_id: + return True + return False # link or clear template of the host @@ -349,7 +369,8 @@ def main(): status=dict(default="enabled", choices=['enabled', 'disabled']), state=dict(default="present", choices=['present', 'absent']), timeout=dict(type='int', default=10), - interfaces=dict(required=False) + interfaces=dict(required=False), + proxy=dict(required=False) ), supports_check_mode=True ) @@ -367,6 +388,7 @@ def main(): state = module.params['state'] timeout = module.params['timeout'] interfaces = module.params['interfaces'] + proxy = module.params['proxy'] # convert enabled to 0; disabled to 1 status = 1 if status == "disabled" else 0 @@ -396,6 +418,11 @@ def main(): if interface['type'] == 1: ip = interface['ip'] + proxy_id = "0" + + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + # check if host exist is_host_exist = host.is_host_exist(host_name) @@ -421,10 +448,10 @@ def main(): if len(exist_interfaces) > interfaces_len: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, zabbix_host_obj): + exist_interfaces, zabbix_host_obj, proxy_id): host.link_or_clear_template(host_id, template_ids) host.update_host(host_name, group_ids, status, host_id, - interfaces, exist_interfaces) + interfaces, exist_interfaces, proxy_id) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" % (host_name, ip, link_templates)) @@ -432,8 +459,8 @@ def main(): module.exit_json(changed=False) else: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces_copy, zabbix_host_obj): - host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces) + exist_interfaces_copy, zabbix_host_obj, proxy_id): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id) host.link_or_clear_template(host_id, template_ids) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" @@ -448,7 +475,7 @@ def main(): module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) # create host - host_id = host.add_host(host_name, group_ids, status, interfaces) + host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id) host.link_or_clear_template(host_id, template_ids) module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( host_name, ip, link_templates)) From 1a914128f6d172da7ea349d6b070758e1ebbff9c Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 22 Jun 2015 20:23:11 +1000 Subject: [PATCH 700/720] Fix aws connection to use params. --- cloud/amazon/dynamodb_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 89a7e0fbb2e..130fae44721 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -258,7 +258,7 @@ def main(): module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - connection = boto.dynamodb2.connect_to_region(region) + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) state = module.params.get('state') if state == 'present': From 0ad12cdcf4e5d4ed90b506917ee5083b1910b0e2 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Mon, 22 Jun 2015 20:09:54 +0200 Subject: [PATCH 701/720] specify int parameter types for wait_interval and wait_retries --- network/haproxy.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/network/haproxy.py b/network/haproxy.py index 690aa60bbba..cd17d057b5f 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -78,13 +78,13 @@ options: description: - number of times to check for status after changing the state required: false - default: 20 + default: 25 version_added: "2.0" wait_interval: description: - number of seconds to wait between retries required: false - default: 1 + default: 5 version_added: "2.0" ''' @@ -129,7 +129,7 @@ import time DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] -WAIT_RETRIES=20 +WAIT_RETRIES=25 WAIT_INTERVAL=1 ###################################################################### @@ -302,9 +302,9 @@ def main(): weight=dict(required=False, default=None), socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION), shutdown_sessions=dict(required=False, default=False), - wait=dict(required=False, default=False), - wait_retries=dict(required=False, default=WAIT_RETRIES), - wait_interval=dict(required=False, default=WAIT_INTERVAL), + wait=dict(required=False, default=False, type='bool'), + wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'), + wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'), ), ) From 2612da50ad637bb469431df699c82b5f68d255e6 Mon Sep 17 00:00:00 2001 From: Gerrit Germis Date: Mon, 22 Jun 2015 20:13:12 +0200 Subject: [PATCH 702/720] wait_interval default value did not match the documented value --- network/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/haproxy.py b/network/haproxy.py index cd17d057b5f..6d4f6a4279a 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -130,7 +130,7 @@ DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" RECV_SIZE = 1024 ACTION_CHOICES = ['enabled', 'disabled'] WAIT_RETRIES=25 -WAIT_INTERVAL=1 +WAIT_INTERVAL=5 ###################################################################### class TimeoutException(Exception): From d8063b913ee49f03236c30a3d90b6e106c949f3f Mon Sep 17 00:00:00 2001 From: jpic Date: Tue, 23 Jun 2015 19:36:43 +0200 Subject: [PATCH 703/720] Define HAS_LXC even if import lxc doesn't fail. This fixes:: Traceback (most recent call last): File "/home/jpic/.ansible/tmp/ansible-tmp-1435080800.61-38257321141340/lxc_container", line 3353, in main() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080800.61-38257321141340/lxc_container", line 1712, in main if not HAS_LXC: NameError: global name 'HAS_LXC' is not defined --- cloud/lxc/lxc_container.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index e6d70f4e487..2264a86c40c 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -385,6 +385,8 @@ try: import lxc except ImportError: HAS_LXC = False +else: + HAS_LXC = True # LXC_COMPRESSION_MAP is a map of available compression types when creating From c4d24721483af1e347b7408c8d19cf1617a6a91f Mon Sep 17 00:00:00 2001 From: jpic Date: Tue, 23 Jun 2015 19:38:51 +0200 Subject: [PATCH 704/720] Fixed lxc option parsing. This fixes:: Traceback (most recent call last): File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 3355, in main() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1724, in main lxc_manage.run() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1605, in run action() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 1145, in _started self._config() File "/home/jpic/.ansible/tmp/ansible-tmp-1435080916.98-133068627776311/lxc_container", line 714, in _config _, _value = option_line.split('=') ValueError: too many values to unpack With such a task:: tasks: - lxc_container: name: buildbot-master container_config: - "lxc.mount.entry = {{ cwd }} srv/peopletest none defaults,bind,uid=0,create=dir 0 0" --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index e6d70f4e487..090d4f73c97 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -708,7 +708,7 @@ class LxcContainerManagement(object): for option_line in container_config: # Look for key in config if option_line.startswith(key): - _, _value = option_line.split('=') + _, _value = option_line.split('=', 1) config_value = ' '.join(_value.split()) line_index = container_config.index(option_line) # If the sanitized values don't match replace them From ebe1904e59aaa9a459c3993bce6a499dc5bd9b73 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 23 Jun 2015 14:12:07 -0500 Subject: [PATCH 705/720] Add missing __init__.py --- cloud/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/rackspace/__init__.py diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From d5d84288ae0abba26cb8f66ae0ef9f2db07f306c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 23 Jun 2015 14:12:17 -0500 Subject: [PATCH 706/720] Bump version_added to 2.0 --- cloud/rackspace/rax_mon_alarm.py | 2 +- cloud/rackspace/rax_mon_check.py | 2 +- cloud/rackspace/rax_mon_entity.py | 2 +- cloud/rackspace/rax_mon_notification.py | 2 +- cloud/rackspace/rax_mon_notification_plan.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index f9b97bc8dd1..a3f29e22f50 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -27,7 +27,7 @@ description: notifications. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> *rax_mon_alarm* -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 101efd3c858..14b86864e2f 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -28,7 +28,7 @@ description: monitor. Rackspace monitoring module flow | rax_mon_entity -> *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index 5f82ff9c524..f5f142d2165 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -26,7 +26,7 @@ description: provide a convenient, centralized place to store IP addresses. Rackspace monitoring module flow | *rax_mon_entity* -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: label: description: diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index 8a21b088c5e..d7b6692dc2c 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -25,7 +25,7 @@ description: channel that can be used to communicate alarms, such as email, webhooks, or PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index 05b89b2cfb3..5bb3fa1652a 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -26,7 +26,7 @@ description: associating existing rax_mon_notifications with severity levels. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -version_added: "1.9" +version_added: "2.0" options: state: description: From f1e3260b3f97e37ae70788b42f089dd53f591b99 Mon Sep 17 00:00:00 2001 From: Arnaud Dematte Date: Tue, 21 Apr 2015 14:48:44 +0200 Subject: [PATCH 707/720] Update mail.py to allow html content Adding parameter subtype to allow html based content. The default behavior of text/plain has been preserved. --- notification/mail.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index c42e80fdabf..52869460862 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -110,6 +110,11 @@ options: - The character set of email being sent default: 'us-ascii' required: false + subtype: + description: + - The minor mime type, can be either text or html. The major type is always text. + default: 'plain' + required: false """ EXAMPLES = ''' @@ -183,7 +188,8 @@ def main(): body = dict(default=None), attach = dict(default=None), headers = dict(default=None), - charset = dict(default='us-ascii') + charset = dict(default='us-ascii'), + subtype = dict(default='plain') ) ) @@ -200,6 +206,7 @@ def main(): attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') + subtype = module.params.get('subtype') sender_phrase, sender_addr = parseaddr(sender) if not body: @@ -259,7 +266,7 @@ def main(): if len(cc_list) > 0: msg['Cc'] = ", ".join(cc_list) - part = MIMEText(body + "\n\n", _charset=charset) + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) msg.attach(part) if attach_files is not None: From 955bf92ff892a7359a045e1ddb3b29b7809a230b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 06:53:28 -0700 Subject: [PATCH 708/720] Add version_added to the subtype parameter --- notification/mail.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/mail.py b/notification/mail.py index 52869460862..8be9a589cbf 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -115,6 +115,7 @@ options: - The minor mime type, can be either text or html. The major type is always text. default: 'plain' required: false + version_added: "2.0" """ EXAMPLES = ''' From 9183170a4a0e8d1ccfdf8c3535ad3b28ca25b22c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 07:05:29 -0700 Subject: [PATCH 709/720] These modules were added to version 2.0, not 1.9 --- windows/win_iis_virtualdirectory.py | 2 +- windows/win_iis_webapplication.py | 2 +- windows/win_iis_webapppool.py | 2 +- windows/win_iis_webbinding.py | 2 +- windows/win_iis_website.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index bbedfbbb4ab..c8a5dd1dcc8 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_virtualdirectory -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS virtual directories. description: - Creates, Removes and configures a IIS Web site diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py index d8a59b66054..11a338e71e0 100644 --- a/windows/win_iis_webapplication.py +++ b/windows/win_iis_webapplication.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_website -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web application. description: - Creates, Removes and configures a IIS Web applications diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py index 320fe07f637..c77c3b04cb7 100644 --- a/windows/win_iis_webapppool.py +++ b/windows/win_iis_webapppool.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: win_iis_webapppool -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web Application Pool. description: - Creates, Removes and configures a IIS Web Application Pool diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py index 0cc5da158bf..061bed73723 100644 --- a/windows/win_iis_webbinding.py +++ b/windows/win_iis_webbinding.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: win_iis_webbinding -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web site. description: - Creates, Removes and configures a binding to an existing IIS Web site diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py index 0893b11c2bd..8921afe5970 100644 --- a/windows/win_iis_website.py +++ b/windows/win_iis_website.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: win_iis_website -version_added: "1.9" +version_added: "2.0" short_description: Configures a IIS Web site. description: - Creates, Removes and configures a IIS Web site From dec7d95d514ca89c2784b63d836dd6fb872bdd9c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 07:12:10 -0700 Subject: [PATCH 710/720] Fix up docs --- cloud/amazon/dynamodb_table.py | 1 + windows/win_iis_virtualdirectory.py | 4 ++-- windows/win_iis_webapplication.py | 14 +++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 130fae44721..94d1f4616bb 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -23,6 +23,7 @@ description: - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. author: Alan Loi (@loia) +version_added: "2.0" requirements: - "boto >= 2.13.2" options: diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index c8a5dd1dcc8..e5bbd950007 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -28,13 +28,13 @@ description: options: name: description: - - The name of the virtual directory to create. + - The name of the virtual directory to create or remove required: true default: null aliases: [] state: description: - - + - Whether to add or remove the specified virtual directory choices: - absent - present diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py index 11a338e71e0..b8ebd085162 100644 --- a/windows/win_iis_webapplication.py +++ b/windows/win_iis_webapplication.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: win_iis_website +module: win_iis_webapplication version_added: "2.0" short_description: Configures a IIS Web application. description: @@ -32,12 +32,12 @@ options: required: true default: null aliases: [] - site: - description: - - Name of the site on which the application is created. - required: true - default: null - aliases: [] + site: + description: + - Name of the site on which the application is created. + required: true + default: null + aliases: [] state: description: - State of the web application From 60b5ae35b30d4c2a2b2d337ac413864d6df8251a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 14:23:35 +0200 Subject: [PATCH 711/720] cloudstack: make get_template_or_iso returning a dict for fix GH-646 --- cloud/cloudstack/cs_instance.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index a93a524383a..7cf4426267e 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -355,6 +355,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) self.instance = None + self.template = None + self.iso = None def get_service_offering_id(self): @@ -371,7 +373,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.module.fail_json(msg="Service offering '%s' not found" % service_offering) - def get_template_or_iso_id(self): + def get_template_or_iso(self, key=None): template = self.module.params.get('template') iso = self.module.params.get('iso') @@ -388,21 +390,28 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['zoneid'] = self.get_zone('id') if template: + if self.template: + return self._get_by_key(key, self.template) + args['templatefilter'] = 'executable' templates = self.cs.listTemplates(**args) if templates: for t in templates['template']: if template in [ t['displaytext'], t['name'], t['id'] ]: - return t['id'] + self.template = t + return self._get_by_key(key, self.template) self.module.fail_json(msg="Template '%s' not found" % template) elif iso: + if self.iso: + return self._get_by_key(key, self.iso) args['isofilter'] = 'executable' isos = self.cs.listIsos(**args) if isos: for i in isos['iso']: if iso in [ i['displaytext'], i['name'], i['id'] ]: - return i['id'] + self.iso = i + return self._get_by_key(key, self.iso) self.module.fail_json(msg="ISO '%s' not found" % iso) @@ -503,7 +512,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['changed'] = True args = {} - args['templateid'] = self.get_template_or_iso_id() + args['templateid'] = self.get_template_or_iso(key='id') args['zoneid'] = self.get_zone('id') args['serviceofferingid'] = self.get_service_offering_id() args['account'] = self.get_account('name') From b1e6d6ba52c7aaa5f2ab1c73e642d774ad88986c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 14:52:31 +0200 Subject: [PATCH 712/720] cloudstack: fix cs_instance hypervisor must be omitted if set on template/iso Fix related to issue reported in PR GH-646 --- cloud/cloudstack/cs_instance.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 7cf4426267e..0d156390e83 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -70,8 +70,8 @@ options: hypervisor: description: - Name the hypervisor to be used for creating the new instance. - - Relevant when using C(state=present) and option C(ISO) is used. - - If not set, first found hypervisor will be used. + - Relevant when using C(state=present), but only considered if not set on ISO/template. + - If not set or found on ISO/template, first found hypervisor will be used. required: false default: null choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] @@ -520,7 +520,6 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['projectid'] = self.get_project('id') args['diskofferingid'] = self.get_disk_offering_id() args['networkids'] = self.get_network_ids() - args['hypervisor'] = self.get_hypervisor() args['userdata'] = self.get_user_data() args['keyboard'] = self.module.params.get('keyboard') args['ipaddress'] = self.module.params.get('ip_address') @@ -532,6 +531,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + template_iso = self.get_template_or_iso() + if 'hypervisor' not in template_iso: + args['hypervisor'] = self.get_hypervisor() + instance = None if not self.module.check_mode: instance = self.cs.deployVirtualMachine(**args) From 9a1918c62875fde93267213631fc8852a704f31e Mon Sep 17 00:00:00 2001 From: Tim Hoiberg Date: Wed, 13 May 2015 19:40:50 +1000 Subject: [PATCH 713/720] Adding module to manage Ruby Gem dependencies via Bundler --- packaging/language/bundler.py | 199 ++++++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 packaging/language/bundler.py diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py new file mode 100644 index 00000000000..877d09dbea5 --- /dev/null +++ b/packaging/language/bundler.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tim Hoiberg +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION=''' +--- +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem +version_added: "2.0.0" +options: + executable: + description: + - The path to the bundler executable + required: false + default: null + state: + description: + - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + required: false + choices: [present, latest] + default: present + chdir: + description: + - The directory to execute the bundler commands from. This directoy needs to contain a valid Gemfile or .bundle/ directory + required: false + default: temporary working directory + exclude_groups: + description: + - A list of Gemfile groups to exclude during operations. This only applies when state is C(present). Bundler considers this a 'remembered' + property for the Gemfile and will automatically exclude groups in future operations even if C(exclude_groups) is not set + required: false + default: null + clean: + description: + - Only applies if state is C(present). If set removes any gems on the target host that are not in the gemfile + required: false + choices: [yes, no] + default: "no" + gemfile: + description: + - Only applies if state is C(present). The path to the gemfile to use to install gems. + required: false + default: Gemfile in current directory + local: + description: + - If set only installs gems from the cache on the target host + required: false + choices: [yes, no] + default: "no" + deployment_mode: + description: + - Only applies if state is C(present). If set it will only install gems that are in the default or production groups. Requires a Gemfile.lock + file to have been created prior + required: false + choices: [yes, no] + default: "no" + user_install: + description: + - Only applies if state is C(present). Installs gems in the local user's cache or for all users + required: false + choices: [yes, no] + default: "yes" + gem_path: + description: + - Only applies if state is C(present). Specifies the directory to install the gems into. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: RubyGems gem paths + binstub_directory: + description: + - Only applies if state is C(present). Specifies the directory to install any gem bins files to. When executed the bin files will run within + the context of the Gemfile and fail if any required gem dependencies are not installed. If C(chdir) is set then this path is relative to C(chdir) + required: false + default: null + extra_args: + description: + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation for more + information + required: false + default: null +author: Tim Hoiberg +''' + +EXAMPLES=''' +# Installs gems from a Gemfile in the current directory +- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle + +# Excludes the production group from installing +- bundler: state=present exclude_groups=production + +# Only install gems from the default and production groups +- bundler: state=present deployment=yes + +# Installs gems using a Gemfile in another directory +- bunlder: state=present gemfile=../rails_project/Gemfile + +# Updates Gemfile in another directory +- bundler: state=latest chdir=~/rails_project +''' + + +def get_bundler_executable(module): + if module.params.get('executable'): + return module.params.get('executable').split(' ') + else: + return [ module.get_bin_path('bundle', True) ] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False), + exclude_groups=dict(default=None, required=False, type='list'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False), + binstub_directory=dict(default=None, required=False), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + executable = module.params.get('executable') + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_install_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +from ansible.module_utils.basic import * +main() \ No newline at end of file From 1d48c9658a6c539f6a82f6b857342cc20a321597 Mon Sep 17 00:00:00 2001 From: Tim Hoiberg Date: Sat, 27 Jun 2015 15:50:30 +1000 Subject: [PATCH 714/720] Fixing typo --- packaging/language/bundler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index 877d09dbea5..82ef2838a9a 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -110,7 +110,7 @@ EXAMPLES=''' - bundler: state=present deployment=yes # Installs gems using a Gemfile in another directory -- bunlder: state=present gemfile=../rails_project/Gemfile +- bundler: state=present gemfile=../rails_project/Gemfile # Updates Gemfile in another directory - bundler: state=latest chdir=~/rails_project From c7d554677736566d8aace3632e84c04ba744bbd9 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Mon, 29 Jun 2015 09:27:44 +0200 Subject: [PATCH 715/720] openbsd_pkg: Update author mail address. --- packaging/os/openbsd_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 1b5d0bb06b2..1f331261d98 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Patrik Lundin +# (c) 2013, Patrik Lundin # # This file is part of Ansible # From 9e8802cacd26617efbab32f26505158a6e2d64fc Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 29 Jun 2015 20:45:53 +1000 Subject: [PATCH 716/720] Docfixes - add version_added and default values. --- cloud/amazon/dynamodb_table.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 130fae44721..f3ba7d7e77c 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -22,6 +22,7 @@ description: - Create or delete AWS Dynamo DB tables. - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. +version_added: "2.0" author: Alan Loi (@loia) requirements: - "boto >= 2.13.2" @@ -41,6 +42,7 @@ options: - Name of the hash key. - Required when C(state=present). required: false + default: null hash_key_type: description: - Type of the hash key. @@ -51,6 +53,7 @@ options: description: - Name of the range key. required: false + default: null range_key_type: description: - Type of the range key. From c7f0fafe62c4cb08762dbffa2dbe01921123549b Mon Sep 17 00:00:00 2001 From: Alan Loi Date: Mon, 29 Jun 2015 20:55:33 +1000 Subject: [PATCH 717/720] Check AWS region and credentials are provided. --- cloud/amazon/dynamodb_table.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index f3ba7d7e77c..4b29cfbfaa9 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -119,7 +119,7 @@ try: from boto.dynamodb2.table import Table from boto.dynamodb2.fields import HashKey, RangeKey from boto.dynamodb2.types import STRING, NUMBER, BINARY - from boto.exception import BotoServerError, JSONResponseError + from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError HAS_BOTO = True except ImportError: @@ -261,7 +261,14 @@ def main(): module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + if not region: + module.fail_json(msg='region must be specified') + + try: + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + + except (NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) state = module.params.get('state') if state == 'present': @@ -274,4 +281,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From 86fda85ba38f45d9f274fb0af73d3f291a6e5be3 Mon Sep 17 00:00:00 2001 From: Timothy Vandenbrande Date: Mon, 29 Jun 2015 14:18:09 +0200 Subject: [PATCH 718/720] updated version added for source into the docs --- system/firewalld.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/firewalld.py b/system/firewalld.py index 0348c6ecb47..677ced8aa78 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -46,6 +46,7 @@ options: - 'The source/network you would like to add/remove to/from firewalld' required: false default: null + version_added: "2.0" zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' From f14317f7f54e7cc873f284c1ea82927b6bd45820 Mon Sep 17 00:00:00 2001 From: tylerturk Date: Mon, 29 Jun 2015 07:51:58 -0500 Subject: [PATCH 719/720] Fix documentation bug --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 7719006502d..ff1ce9831db 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -115,7 +115,7 @@ EXAMPLES = """ gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}' - name: start gluster volume - gluster_volume: status=started name=test1 + gluster_volume: state=started name=test1 - name: limit usage gluster_volume: state=present name=test1 directory=/foo quota=20.0MB From 86d5ca411c2e8d770515b544602c378a39ac7471 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 13:09:11 -0700 Subject: [PATCH 720/720] Add testing documentation to travis --- .travis.yml | 1 + test-docs.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100755 test-docs.sh diff --git a/.travis.yml b/.travis.yml index 84ec3a0983a..d43c6b3b3fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,3 +13,4 @@ script: - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + - ./test-docs.sh extras diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite